Compare commits

...

34 Commits

Author SHA1 Message Date
Daniel García
2626e66873 Merge pull request #1069 from jjlin/master
Skip cleanup of `arm32v6` arch-specific tags
2020-07-24 23:05:29 +02:00
Jeremy Lin
81e0e1b339 Skip cleanup of arm32v6 arch-specific tags 2020-07-24 11:32:44 -07:00
Daniel García
fd1354d00e Merge pull request #1067 from jjlin/log-time-fmt
Add config option for log timestamp format
2020-07-24 16:42:10 +02:00
Jeremy Lin
071a3b2a32 Log timestamps with milliseconds by default 2020-07-23 14:19:51 -07:00
Daniel García
32cfaab5ee Updated dependencies and changed rocket request imports 2020-07-23 21:07:04 +02:00
Jeremy Lin
d348f12a0e Add config option for log timestamp format 2020-07-22 21:50:49 -07:00
Daniel García
11845d9f5b Merge pull request #1061 from jjlin/use-strip-prefix
Use `strip_prefix()` instead of `trim_start_matches()` as appropriate
2020-07-21 16:31:31 +02:00
Jeremy Lin
de70fbf88a Use strip_prefix() instead of trim_start_matches() as appropriate
As of Rust 1.45.0, `strip_prefix()` is now stable.
2020-07-20 22:33:13 -07:00
Daniel García
0b04caab78 Merge pull request #1029 from jjlin/multi-arch
Multi-arch image support
2020-07-16 22:59:12 +02:00
Jeremy Lin
4c78c5a9c9 Tag latest releases as latest and alpine 2020-07-15 20:03:34 -07:00
Jeremy Lin
73f0841f17 Clean up arch-specific tags if Docker Hub credentials are provided 2020-07-15 20:03:34 -07:00
Jeremy Lin
4559e85daa Multi-arch image support 2020-07-15 20:03:34 -07:00
Jeremy Lin
bbef332e25 Dockerfile.j2: remove dead code 2020-07-15 20:03:34 -07:00
Daniel García
1e950c7dbc Replace IP support in preparation for compiling on stable, included some tests to check that the code matches the unstable implementation 2020-07-15 00:00:03 +02:00
Daniel García
f14e19a3d8 Don't compile the regexes each time 2020-07-14 21:58:27 +02:00
Daniel García
668d5c23dc Removed try_trait and some formatting, particularly around imports 2020-07-14 18:34:22 +02:00
Daniel García
fb6f96f5c3 Updated dependencies 2020-07-14 16:08:11 +02:00
Daniel García
6e6e34ff18 Merge pull request #1055 from jjlin/pg
Fix error in PostgreSQL build
2020-07-11 11:17:45 +02:00
Jeremy Lin
790146bfac Fix error in PostgreSQL build 2020-07-10 17:23:02 -07:00
Daniel García
af625930d6 Merge pull request #1049 from jjlin/local-tz
Use local time in email notifications for new device logins
2020-07-08 19:39:17 +02:00
Jeremy Lin
a28ebcb401 Use local time in email notifications for new device logins
In this implementation, the `TZ` environment variable must be set
in order for the formatted output to use a more user-friendly
time zone abbreviation (e.g., `UTC`). Otherwise, the output uses
the time zone's UTC offset (e.g., `+00:00`).
2020-07-07 21:30:18 -07:00
Daniel García
77e47ddd1f Merge pull request #1042 from jjlin/hide-passwords
Add support for hiding passwords in a collection
2020-07-06 18:56:06 +02:00
Daniel García
5b620ba6cd Merge pull request #1048 from jjlin/init
Add startup script to support init operations
2020-07-06 18:15:18 +02:00
Jeremy Lin
d5f9b33f66 Add startup script to support init operations
This is useful for making local customizations upon container start. To use
this feature, mount a script into the container as `/etc/bitwarden_rs.sh`
and/or a directory of scripts as `/etc/bitwarden_rs.d`. In the latter case,
only files with an `.sh` extension are sourced, so files with other
extensions (e.g., data/config files) can reside in the same dir.

Note that the init scripts are run each time the container starts (not just
the first time), so these scripts should be idempotent.
2020-07-05 15:26:20 -07:00
Daniel García
596c9b8691 Add option to set name during HELO in email settings 2020-07-05 01:59:15 +02:00
Daniel García
d4357eb55a Updated dependencies ans web vault version 2020-07-05 01:38:16 +02:00
Daniel García
b37f0dfde3 Merge pull request #1044 from ArmaanT/master
Allow postgres:// in DATABASE_URL
2020-07-05 01:07:29 +02:00
Armaan Tobaccowalla
624791e09a Allow postgres:// DATABASE_URL 2020-07-04 16:13:27 -04:00
Jeremy Lin
f9a73a9bbe More cipher optimization/cleanup 2020-07-03 10:49:10 -07:00
Jeremy Lin
35868dd72c Optimize cipher queries 2020-07-03 09:00:33 -07:00
Jeremy Lin
979d010dc2 Add support for hiding passwords in a collection
Ref: https://github.com/bitwarden/server/pull/743
2020-07-02 21:51:20 -07:00
Daniel García
b34d548246 Update dependencies 2020-06-22 17:15:20 +02:00
Daniel García
a87646b8cb Some format changes to main.rs 2020-06-15 23:40:39 +02:00
Daniel García
a2411eef56 Updated dependencies 2020-06-15 23:04:52 +02:00
60 changed files with 1276 additions and 820 deletions

View File

@@ -44,6 +44,10 @@
## Enable extended logging, which shows timestamps and targets in the logs ## Enable extended logging, which shows timestamps and targets in the logs
# EXTENDED_LOGGING=true # EXTENDED_LOGGING=true
## Timestamp format used in extended logging.
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
## Logging to file ## Logging to file
## It's recommended to also set 'ROCKET_CLI_COLORS=off' ## It's recommended to also set 'ROCKET_CLI_COLORS=off'
# LOG_FILE=/path/to/log # LOG_FILE=/path/to/log

737
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -17,6 +17,10 @@ mysql = ["diesel/mysql", "diesel_migrations/mysql"]
postgresql = ["diesel/postgres", "diesel_migrations/postgres"] postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"] sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
# Enable unstable features, requires nightly
# Currently only used to enable rusts official ip support
unstable = []
[target."cfg(not(windows))".dependencies] [target."cfg(not(windows))".dependencies]
syslog = "4.0.1" syslog = "4.0.1"
@@ -29,7 +33,7 @@ rocket_contrib = "0.5.0-dev"
reqwest = { version = "0.10.6", features = ["blocking", "json"] } reqwest = { version = "0.10.6", features = ["blocking", "json"] }
# multipart/form-data support # multipart/form-data support
multipart = { version = "0.16.1", features = ["server"], default-features = false } multipart = { version = "0.17.0", features = ["server"], default-features = false }
# WebSockets library # WebSockets library
ws = "0.9.1" ws = "0.9.1"
@@ -41,29 +45,30 @@ rmpv = "0.4.4"
chashmap = "2.2.2" chashmap = "2.2.2"
# A generic serialization/deserialization framework # A generic serialization/deserialization framework
serde = "1.0.111" serde = "1.0.114"
serde_derive = "1.0.111" serde_derive = "1.0.114"
serde_json = "1.0.53" serde_json = "1.0.56"
# Logging # Logging
log = "0.4.8" log = "0.4.11"
fern = { version = "0.6.0", features = ["syslog-4"] } fern = { version = "0.6.0", features = ["syslog-4"] }
# A safe, extensible ORM and Query builder # A safe, extensible ORM and Query builder
diesel = { version = "1.4.4", features = [ "chrono", "r2d2"] } diesel = { version = "1.4.5", features = [ "chrono", "r2d2"] }
diesel_migrations = "1.4.0" diesel_migrations = "1.4.0"
# Bundled SQLite # Bundled SQLite
libsqlite3-sys = { version = "0.17.3", features = ["bundled"], optional = true } libsqlite3-sys = { version = "0.18.0", features = ["bundled"], optional = true }
# Crypto library # Crypto library
ring = "0.16.14" ring = "0.16.15"
# UUID generation # UUID generation
uuid = { version = "0.8.1", features = ["v4"] } uuid = { version = "0.8.1", features = ["v4"] }
# Date and time librar for Rust # Date and time libraries
chrono = "0.4.11" chrono = "0.4.13"
chrono-tz = "0.5.2"
time = "0.2.16" time = "0.2.16"
# TOTP library # TOTP library
@@ -73,13 +78,13 @@ oath = "0.10.2"
data-encoding = "2.2.1" data-encoding = "2.2.1"
# JWT library # JWT library
jsonwebtoken = "7.1.0" jsonwebtoken = "7.2.0"
# U2F library # U2F library
u2f = "0.2.0" u2f = "0.2.0"
# Yubico Library # Yubico Library
yubico = { version = "0.9.0", features = ["online-tokio"], default-features = false } yubico = { version = "0.9.1", features = ["online-tokio"], default-features = false }
# A `dotenv` implementation for Rust # A `dotenv` implementation for Rust
dotenv = { version = "0.15.0", default-features = false } dotenv = { version = "0.15.0", default-features = false }
@@ -88,15 +93,15 @@ dotenv = { version = "0.15.0", default-features = false }
once_cell = "1.4.0" once_cell = "1.4.0"
# Numerical libraries # Numerical libraries
num-traits = "0.2.11" num-traits = "0.2.12"
num-derive = "0.3.0" num-derive = "0.3.0"
# Email libraries # Email libraries
lettre = { version = "0.10.0-alpha.1", features = ["smtp-transport", "builder", "serde", "native-tls"], default-features = false } lettre = { version = "0.10.0-alpha.1", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname"], default-features = false }
native-tls = "0.2.4" native-tls = "0.2.4"
# Template library # Template library
handlebars = { version = "3.0.1", features = ["dir_source"] } handlebars = { version = "3.3.0", features = ["dir_source"] }
# For favicon extraction from main website # For favicon extraction from main website
soup = "0.5.0" soup = "0.5.0"
@@ -104,7 +109,7 @@ regex = "1.3.9"
data-url = "0.1.0" data-url = "0.1.0"
# Used by U2F, JWT and Postgres # Used by U2F, JWT and Postgres
openssl = "0.10.29" openssl = "0.10.30"
# URL encoding library # URL encoding library
percent-encoding = "2.1.0" percent-encoding = "2.1.0"
@@ -112,10 +117,10 @@ percent-encoding = "2.1.0"
idna = "0.2.0" idna = "0.2.0"
# CLI argument parsing # CLI argument parsing
structopt = "0.3.14" structopt = "0.3.15"
# Logging panics to logfile instead stderr only # Logging panics to logfile instead stderr only
backtrace = "0.3.48" backtrace = "0.3.50"
[patch.crates-io] [patch.crates-io]
# Use newest ring # Use newest ring

View File

@@ -9,13 +9,13 @@
{% elif "amd64" in target_file %} {% elif "amd64" in target_file %}
{% set runtime_stage_base_image = "debian:buster-slim" %} {% set runtime_stage_base_image = "debian:buster-slim" %}
{% set package_arch_name = "" %} {% set package_arch_name = "" %}
{% elif "aarch64" in target_file %} {% elif "arm64v8" in target_file %}
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %} {% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
{% set package_arch_name = "arm64" %} {% set package_arch_name = "arm64" %}
{% elif "armv6" in target_file %} {% elif "arm32v6" in target_file %}
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %} {% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
{% set package_arch_name = "armel" %} {% set package_arch_name = "armel" %}
{% elif "armv7" in target_file %} {% elif "arm32v7" in target_file %}
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %} {% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
{% set package_arch_name = "armhf" %} {% set package_arch_name = "armhf" %}
{% endif %} {% endif %}
@@ -27,17 +27,17 @@
# https://docs.docker.com/develop/develop-images/multistage-build/ # https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/ # https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE ####################### ####################### VAULT BUILD IMAGE #######################
{% set vault_image_hash = "sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5" %} {% set vault_image_hash = "sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c" %}
{% raw %} {% raw %}
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable. # This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
{% endraw %} {% endraw %}
FROM bitwardenrs/web-vault@{{ vault_image_hash }} as vault FROM bitwardenrs/web-vault@{{ vault_image_hash }} as vault
@@ -73,7 +73,7 @@ RUN rustup set profile minimal
ENV USER "root" ENV USER "root"
ENV RUSTFLAGS='-C link-arg=-s' ENV RUSTFLAGS='-C link-arg=-s'
{% elif "aarch64" in target_file or "armv" in target_file %} {% elif "arm32" in target_file or "arm64" in target_file %}
# Install required build libs for {{ package_arch_name }} architecture. # Install required build libs for {{ package_arch_name }} architecture.
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \ /etc/apt/sources.list.d/deb-src.list \
@@ -85,7 +85,7 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
libc6-dev{{ package_arch_prefix }} libc6-dev{{ package_arch_prefix }}
{% endif -%} {% endif -%}
{% if "aarch64" in target_file %} {% if "arm64v8" in target_file %}
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \ --no-install-recommends \
@@ -97,7 +97,7 @@ RUN apt-get update \
ENV CARGO_HOME "/root/.cargo" ENV CARGO_HOME "/root/.cargo"
ENV USER "root" ENV USER "root"
{% elif "armv6" in target_file %} {% elif "arm32v6" in target_file %}
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \ --no-install-recommends \
@@ -109,19 +109,7 @@ RUN apt-get update \
ENV CARGO_HOME "/root/.cargo" ENV CARGO_HOME "/root/.cargo"
ENV USER "root" ENV USER "root"
{% elif "armv6" in target_file %} {% elif "arm32v7" in target_file %}
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabihf \
&& mkdir -p ~/.cargo \
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
{% elif "armv7" in target_file %}
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \ --no-install-recommends \
@@ -162,17 +150,17 @@ COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs COPY ./build.rs ./build.rs
{% if "aarch64" in target_file %} {% if "arm64v8" in target_file %}
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
ENV CROSS_COMPILE="1" ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu" ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
{% elif "armv6" in target_file %} {% elif "arm32v6" in target_file %}
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
ENV CROSS_COMPILE="1" ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi" ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
{% elif "armv7" in target_file %} {% elif "arm32v7" in target_file %}
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
ENV CROSS_COMPILE="1" ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
@@ -182,13 +170,13 @@ ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
{% if "alpine" in target_file %} {% if "alpine" in target_file %}
RUN rustup target add x86_64-unknown-linux-musl RUN rustup target add x86_64-unknown-linux-musl
{% elif "aarch64" in target_file %} {% elif "arm64v8" in target_file %}
RUN rustup target add aarch64-unknown-linux-gnu RUN rustup target add aarch64-unknown-linux-gnu
{% elif "armv6" in target_file %} {% elif "arm32v6" in target_file %}
RUN rustup target add arm-unknown-linux-gnueabi RUN rustup target add arm-unknown-linux-gnueabi
{% elif "armv7" in target_file %} {% elif "arm32v7" in target_file %}
RUN rustup target add armv7-unknown-linux-gnueabihf RUN rustup target add armv7-unknown-linux-gnueabihf
{% endif %} {% endif %}
# Builds your dependencies and removes the # Builds your dependencies and removes the
@@ -208,11 +196,11 @@ RUN touch src/main.rs
# your actual source files being built # your actual source files being built
{% if "amd64" in target_file %} {% if "amd64" in target_file %}
RUN cargo build --features ${DB} --release RUN cargo build --features ${DB} --release
{% elif "aarch64" in target_file %} {% elif "arm64v8" in target_file %}
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
{% elif "armv6" in target_file %} {% elif "arm32v6" in target_file %}
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
{% elif "armv7" in target_file %} {% elif "arm32v7" in target_file %}
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
{% endif %} {% endif %}
@@ -277,20 +265,21 @@ COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
{% if "alpine" in target_file %} {% if "alpine" in target_file %}
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs . COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
{% elif "aarch64" in target_file %} {% elif "arm64v8" in target_file %}
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs . COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
{% elif "armv6" in target_file %} {% elif "arm32v6" in target_file %}
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs . COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
{% elif "armv7" in target_file %} {% elif "arm32v7" in target_file %}
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs . COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
{% else %} {% else %}
COPY --from=build app/target/release/bitwarden_rs . COPY --from=build app/target/release/bitwarden_rs .
{% endif %} {% endif %}
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

3
docker/README.md Normal file
View File

@@ -0,0 +1,3 @@
The arch-specific directory names follow the arch identifiers used by the Docker official images:
https://github.com/docker-library/official-images/blob/master/README.md#architectures-other-than-amd64

View File

@@ -10,12 +10,12 @@
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
FROM bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 as vault FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because # We need to use the Rust build image, because
@@ -93,9 +93,10 @@ COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs . COPY --from=build app/target/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

View File

@@ -10,12 +10,12 @@
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
FROM bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 as vault FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary # Musl build image for statically compiled binary
@@ -96,9 +96,10 @@ COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs . COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

View File

@@ -10,12 +10,12 @@
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
FROM bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 as vault FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because # We need to use the Rust build image, because
@@ -93,9 +93,10 @@ COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs . COPY --from=build app/target/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

View File

@@ -10,12 +10,12 @@
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
FROM bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 as vault FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary # Musl build image for statically compiled binary
@@ -96,9 +96,10 @@ COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs . COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

View File

@@ -10,12 +10,12 @@
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
FROM bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 as vault FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because # We need to use the Rust build image, because
@@ -87,9 +87,10 @@ COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs . COPY --from=build app/target/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

View File

@@ -10,12 +10,12 @@
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
FROM bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 as vault FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary # Musl build image for statically compiled binary
@@ -90,9 +90,10 @@ COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs . COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

View File

@@ -10,12 +10,12 @@
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
FROM bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 as vault FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because # We need to use the Rust build image, because
@@ -125,9 +125,10 @@ COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs . COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

View File

@@ -10,12 +10,12 @@
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
FROM bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 as vault FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because # We need to use the Rust build image, because
@@ -119,9 +119,10 @@ COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs . COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

View File

@@ -10,12 +10,12 @@
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
FROM bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 as vault FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because # We need to use the Rust build image, because
@@ -124,9 +124,10 @@ COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs . COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

View File

@@ -10,12 +10,12 @@
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
FROM bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 as vault FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because # We need to use the Rust build image, because
@@ -118,9 +118,10 @@ COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs . COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

View File

@@ -10,12 +10,12 @@
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
FROM bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 as vault FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because # We need to use the Rust build image, because
@@ -125,9 +125,10 @@ COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs . COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

View File

@@ -10,12 +10,12 @@
# It can be viewed in multiple ways: # It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands: # - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.14.0 # docker pull bitwardenrs/web-vault:v2.15.1
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.14.0 # docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
# #
# - To do the opposite, and get the tag from the hash, you can do: # - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 # docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
FROM bitwardenrs/web-vault@sha256:c62e0b8698562e03fe2759374f3ecd760d9612e4eaf0af4583f231ebf05d6df5 as vault FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because # We need to use the Rust build image, because
@@ -119,9 +119,10 @@ COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs . COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] CMD ["/start.sh"]

15
docker/start.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/sh
if [ -r /etc/bitwarden_rs.sh ]; then
. /etc/bitwarden_rs.sh
fi
if [ -d /etc/bitwarden_rs.d ]; then
for f in /etc/bitwarden_rs.d/*.sh; do
if [ -r $f ]; then
. $f
fi
done
fi
exec /bitwarden_rs "${@}"

20
hooks/README.md Normal file
View File

@@ -0,0 +1,20 @@
The hooks in this directory are used to create multi-arch images using Docker Hub automated builds.
Docker Hub hooks provide these predefined [environment variables](https://docs.docker.com/docker-hub/builds/advanced/#environment-variables-for-building-and-testing):
* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested.
* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested.
* `COMMIT_MSG`: the message from the commit being tested and built.
* `DOCKER_REPO`: the name of the Docker repository being built.
* `DOCKERFILE_PATH`: the dockerfile currently being built.
* `DOCKER_TAG`: the Docker repository tag being built.
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/database/OS combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
## References
* https://docs.docker.com/docker-hub/builds/advanced/
* https://docs.docker.com/engine/reference/commandline/manifest/
* https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/
* https://success.docker.com/article/how-do-i-authenticate-with-the-v2-api

30
hooks/arches.sh Normal file
View File

@@ -0,0 +1,30 @@
# The default Debian-based SQLite images support these arches.
#
# Other images (Alpine-based, or with other database backends) currently
# support only a subset of these.
arches=(
amd64
arm32v6
arm32v7
arm64v8
)
case "${DOCKER_REPO}" in
*-mysql)
db=mysql
arches=(amd64)
;;
*-postgresql)
db=postgresql
arches=(amd64)
;;
*)
db=sqlite
;;
esac
if [[ "${DOCKER_TAG}" == *alpine ]]; then
# The Alpine build currently only works for amd64.
os_suffix=.alpine
arches=(amd64)
fi

14
hooks/build Executable file
View File

@@ -0,0 +1,14 @@
#!/bin/bash
echo ">>> Building images..."
source ./hooks/arches.sh
set -ex
for arch in "${arches[@]}"; do
docker build \
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
-f docker/${arch}/${db}/Dockerfile${os_suffix} \
.
done

102
hooks/push Executable file
View File

@@ -0,0 +1,102 @@
#!/bin/bash
echo ">>> Pushing images..."
export DOCKER_CLI_EXPERIMENTAL=enabled
declare -A annotations=(
[amd64]="--os linux --arch amd64"
[arm32v6]="--os linux --arch arm --variant v6"
[arm32v7]="--os linux --arch arm --variant v7"
[arm64v8]="--os linux --arch arm64 --variant v8"
)
source ./hooks/arches.sh
set -ex
declare -A images
for arch in ${arches[@]}; do
images[$arch]="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
done
# Push the images that were just built; manifest list creation fails if the
# images (manifests) referenced don't already exist in the Docker registry.
for image in "${images[@]}"; do
docker push "${image}"
done
manifest_lists=("${DOCKER_REPO}:${DOCKER_TAG}")
# If the Docker tag starts with a version number, assume the latest release is
# being pushed. Add an extra manifest (`latest` or `alpine`, as appropriate)
# to make it easier for users to track the latest release.
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
if [[ "${DOCKER_TAG}" == *alpine ]]; then
manifest_lists+=(${DOCKER_REPO}:alpine)
else
manifest_lists+=(${DOCKER_REPO}:latest)
fi
fi
for manifest_list in "${manifest_lists[@]}"; do
# Create the (multi-arch) manifest list of arch-specific images.
docker manifest create ${manifest_list} ${images[@]}
# Make sure each image manifest is annotated with the correct arch info.
# Docker does not auto-detect the arch of each cross-compiled image, so
# everything would appear as `linux/amd64` otherwise.
for arch in "${arches[@]}"; do
docker manifest annotate ${annotations[$arch]} ${manifest_list} ${images[$arch]}
done
# Push the manifest list.
docker manifest push --purge ${manifest_list}
done
# Avoid logging credentials and tokens.
set +ex
# Delete the arch-specific tags, if credentials for doing so are available.
# Note that `DOCKER_PASSWORD` must be the actual user password. Passing a JWT
# obtained using a personal access token results in a 403 error with
# {"detail": "access to the resource is forbidden with personal access token"}
if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then
exit 0
fi
# Given a JSON input on stdin, extract the string value associated with the
# specified key. This avoids an extra dependency on a tool like `jq`.
extract() {
local key="$1"
# Extract "<key>":"<val>" (assumes key/val won't contain double quotes).
# The colon may have whitespace on either side.
grep -o "\"${key}\"[[:space:]]*:[[:space:]]*\"[^\"]\+\"" |
# Extract just <val> by deleting the last '"', and then greedily deleting
# everything up to '"'.
sed -e 's/"$//' -e 's/.*"//'
}
echo ">>> Getting API token..."
jwt=$(curl -sS -X POST \
-H "Content-Type: application/json" \
-d "{\"username\":\"${DOCKER_USERNAME}\",\"password\": \"${DOCKER_PASSWORD}\"}" \
"https://hub.docker.com/v2/users/login" |
extract 'token')
# Strip the registry portion from `index.docker.io/user/repo`.
repo="${DOCKER_REPO#*/}"
for arch in ${arches[@]}; do
# Don't delete the `arm32v6` tag; Docker can't seem to properly
# auto-select that image on Armv6 platforms like Raspberry Pi 1 and Zero
# (https://github.com/moby/moby/issues/41017).
if [[ ${arch} == 'arm32v6' ]]; then
continue
fi
tag="${DOCKER_TAG}-${arch}"
echo ">>> Deleting '${repo}:${tag}'..."
curl -sS -X DELETE \
-H "Authorization: Bearer ${jwt}" \
"https://hub.docker.com/v2/repositories/${repo}/tags/${tag}/"
done

View File

@@ -0,0 +1,2 @@
ALTER TABLE users_collections
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE;

View File

@@ -0,0 +1,2 @@
ALTER TABLE users_collections
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE;

View File

@@ -0,0 +1,2 @@
ALTER TABLE users_collections
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT 0; -- FALSE

View File

@@ -1 +1 @@
nightly-2020-05-31 nightly-2020-07-11

View File

@@ -1,22 +1,26 @@
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use serde_json::Value;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use serde_json::Value;
use std::process::Command; use std::process::Command;
use rocket::http::{Cookie, Cookies, SameSite}; use rocket::{
use rocket::request::{self, FlashMessage, Form, FromRequest, Request}; http::{Cookie, Cookies, SameSite},
use rocket::response::{content::Html, Flash, Redirect}; request::{self, FlashMessage, Form, FromRequest, Request, Outcome},
use rocket::{Outcome, Route}; response::{content::Html, Flash, Redirect},
Route,
};
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use crate::api::{ApiResult, EmptyResult, JsonResult}; use crate::{
use crate::auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp}; api::{ApiResult, EmptyResult, JsonResult},
use crate::config::ConfigBuilder; auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
use crate::db::{backup_database, models::*, DbConn}; config::ConfigBuilder,
use crate::error::Error; db::{backup_database, models::*, DbConn},
use crate::mail; error::{Error, MapResult},
use crate::util::get_display_size; mail,
use crate::CONFIG; util::get_display_size,
CONFIG,
};
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() { if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
@@ -270,21 +274,13 @@ fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
#[post("/users/<uuid>/delete")] #[post("/users/<uuid>/delete")]
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let user = match User::find_by_uuid(&uuid, &conn) { let user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
Some(user) => user,
None => err!("User doesn't exist"),
};
user.delete(&conn) user.delete(&conn)
} }
#[post("/users/<uuid>/deauth")] #[post("/users/<uuid>/deauth")]
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let mut user = match User::find_by_uuid(&uuid, &conn) { let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
Some(user) => user,
None => err!("User doesn't exist"),
};
Device::delete_all_by_user(&user.uuid, &conn)?; Device::delete_all_by_user(&user.uuid, &conn)?;
user.reset_security_stamp(); user.reset_security_stamp();
@@ -293,11 +289,7 @@ fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
#[post("/users/<uuid>/remove-2fa")] #[post("/users/<uuid>/remove-2fa")]
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let mut user = match User::find_by_uuid(&uuid, &conn) { let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
Some(user) => user,
None => err!("User doesn't exist"),
};
TwoFactor::delete_all_by_user(&user.uuid, &conn)?; TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
user.totp_recover = None; user.totp_recover = None;
user.save(&conn) user.save(&conn)
@@ -340,7 +332,7 @@ struct GitCommit {
} }
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> { fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
use reqwest::{header::USER_AGENT, blocking::Client}; use reqwest::{blocking::Client, header::USER_AGENT};
use std::time::Duration; use std::time::Duration;
let github_api = Client::builder().build()?; let github_api = Client::builder().build()?;

View File

@@ -1,19 +1,15 @@
use chrono::Utc; use chrono::Utc;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use crate::db::models::*; use crate::{
use crate::db::DbConn; api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
crypto,
db::{models::*, DbConn},
mail, CONFIG,
};
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType}; pub fn routes() -> Vec<rocket::Route> {
use crate::auth::{decode_delete, decode_invite, decode_verify_email, Headers};
use crate::crypto;
use crate::mail;
use crate::CONFIG;
use rocket::Route;
pub fn routes() -> Vec<Route> {
routes![ routes![
register, register,
profile, profile,

View File

@@ -1,26 +1,20 @@
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::path::Path; use std::path::Path;
use rocket::http::ContentType; use rocket::{http::ContentType, request::Form, Data, Route};
use rocket::{request::Form, Data, Route};
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use multipart::server::save::SavedData;
use multipart::server::{Multipart, SaveResult};
use data_encoding::HEXLOWER; use data_encoding::HEXLOWER;
use multipart::server::{save::SavedData, Multipart, SaveResult};
use crate::db::models::*; use crate::{
use crate::db::DbConn; api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
auth::Headers,
use crate::crypto; crypto,
db::{models::*, DbConn},
use crate::api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType}; CONFIG,
use crate::auth::Headers; };
use crate::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![ routes![
@@ -617,9 +611,8 @@ fn share_cipher_by_uuid(
match data.Cipher.OrganizationId.clone() { match data.Cipher.OrganizationId.clone() {
// If we don't get an organization ID, we don't do anything // If we don't get an organization ID, we don't do anything
// No error because this is used when using the Clone functionality // No error because this is used when using the Clone functionality
None => {}, None => {}
Some(organization_uuid) => { Some(organization_uuid) => {
for uuid in &data.CollectionIds { for uuid in &data.CollectionIds {
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) { match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
None => err!("Invalid collection ID provided"), None => err!("Invalid collection ID provided"),

View File

@@ -1,15 +1,13 @@
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use crate::db::models::*; use crate::{
use crate::db::DbConn; api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
auth::Headers,
db::{models::*, DbConn},
};
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType}; pub fn routes() -> Vec<rocket::Route> {
use crate::auth::Headers;
use rocket::Route;
pub fn routes() -> Vec<Route> {
routes![ routes![
get_folders, get_folders,
get_folder, get_folder,

View File

@@ -29,14 +29,15 @@ pub fn routes() -> Vec<Route> {
// Move this somewhere else // Move this somewhere else
// //
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use crate::api::{EmptyResult, JsonResult, JsonUpcase}; use crate::{
use crate::auth::Headers; api::{EmptyResult, JsonResult, JsonUpcase},
use crate::db::DbConn; auth::Headers,
use crate::error::Error; db::DbConn,
error::Error,
};
#[put("/devices/identifier/<uuid>/clear-token")] #[put("/devices/identifier/<uuid>/clear-token")]
fn clear_device_token(uuid: String) -> EmptyResult { fn clear_device_token(uuid: String) -> EmptyResult {
@@ -146,7 +147,7 @@ fn hibp_breach(username: String) -> JsonResult {
username username
); );
use reqwest::{header::USER_AGENT, blocking::Client}; use reqwest::{blocking::Client, header::USER_AGENT};
if let Some(api_key) = crate::CONFIG.hibp_api_key() { if let Some(api_key) = crate::CONFIG.hibp_api_key() {
let hibp_client = Client::builder().build()?; let hibp_client = Client::builder().build()?;

View File

@@ -1,17 +1,14 @@
use rocket::request::Form; use num_traits::FromPrimitive;
use rocket::Route; use rocket::{request::Form, Route};
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use num_traits::FromPrimitive;
use crate::api::{ use crate::{
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType, api::{EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType},
auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders},
db::{models::*, DbConn},
mail, CONFIG,
}; };
use crate::auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders};
use crate::db::models::*;
use crate::db::DbConn;
use crate::mail;
use crate::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![ routes![
@@ -374,7 +371,7 @@ fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders,
.map(|col_user| { .map(|col_user| {
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn) UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
.unwrap() .unwrap()
.to_json_read_only(col_user.read_only) .to_json_user_access_restrictions(&col_user)
}) })
.collect(); .collect();
@@ -408,7 +405,9 @@ fn put_collection_users(
continue; continue;
} }
CollectionUser::save(&user.user_uuid, &coll_id, d.ReadOnly, &conn)?; CollectionUser::save(&user.user_uuid, &coll_id,
d.ReadOnly, d.HidePasswords,
&conn)?;
} }
Ok(()) Ok(())
@@ -452,6 +451,7 @@ fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonRe
struct CollectionData { struct CollectionData {
Id: String, Id: String,
ReadOnly: bool, ReadOnly: bool,
HidePasswords: bool,
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@@ -523,7 +523,9 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) { match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
None => err!("Collection not found in Organization"), None => err!("Collection not found in Organization"),
Some(collection) => { Some(collection) => {
CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, &conn)?; CollectionUser::save(&user.uuid, &collection.uuid,
col.ReadOnly, col.HidePasswords,
&conn)?;
} }
} }
} }
@@ -778,7 +780,9 @@ fn edit_user(
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) { match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
None => err!("Collection not found in Organization"), None => err!("Collection not found in Organization"),
Some(collection) => { Some(collection) => {
CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid, col.ReadOnly, &conn)?; CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid,
col.ReadOnly, col.HidePasswords,
&conn)?;
} }
} }
} }

View File

@@ -2,13 +2,16 @@ use data_encoding::BASE32;
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use crate::api::core::two_factor::_generate_recover_code; use crate::{
use crate::api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData}; api::{
use crate::auth::{ClientIp, Headers}; core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
use crate::crypto; },
use crate::db::{ auth::{ClientIp, Headers},
models::{TwoFactor, TwoFactorType}, crypto,
DbConn, db::{
models::{TwoFactor, TwoFactorType},
DbConn,
},
}; };
pub use crate::config::CONFIG; pub use crate::config::CONFIG;

View File

@@ -3,16 +3,17 @@ use data_encoding::BASE64;
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use crate::api::core::two_factor::_generate_recover_code; use crate::{
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData}; api::{core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData},
use crate::auth::Headers; auth::Headers,
use crate::crypto; crypto,
use crate::db::{ db::{
models::{TwoFactor, TwoFactorType, User}, models::{TwoFactor, TwoFactorType, User},
DbConn, DbConn,
},
error::MapResult,
CONFIG,
}; };
use crate::error::MapResult;
use crate::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![get_duo, activate_duo, activate_duo_put,] routes![get_duo, activate_duo, activate_duo_put,]
@@ -186,7 +187,7 @@ fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbC
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult { fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)"; const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
use reqwest::{header::*, Method, blocking::Client}; use reqwest::{blocking::Client, header::*, Method};
use std::str::FromStr; use std::str::FromStr;
// https://duo.com/docs/authapi#api-details // https://duo.com/docs/authapi#api-details

View File

@@ -1,20 +1,18 @@
use chrono::{Duration, NaiveDateTime, Utc};
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use crate::api::core::two_factor::_generate_recover_code; use crate::{
use crate::api::{EmptyResult, JsonResult, JsonUpcase, PasswordData}; api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
use crate::auth::Headers; auth::Headers,
use crate::crypto; crypto,
use crate::db::{ db::{
models::{TwoFactor, TwoFactorType}, models::{TwoFactor, TwoFactorType},
DbConn, DbConn,
},
error::{Error, MapResult},
mail, CONFIG,
}; };
use crate::error::Error;
use crate::mail;
use crate::CONFIG;
use chrono::{Duration, NaiveDateTime, Utc};
use std::ops::Add;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![get_email, send_email_login, send_email, email,] routes![get_email, send_email_login, send_email, email,]
@@ -58,7 +56,7 @@ fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> Empty
/// Generate the token, save the data for later verification and send email to user /// Generate the token, save the data for later verification and send email to user
pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult { pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
let type_ = TwoFactorType::Email as i32; let type_ = TwoFactorType::Email as i32;
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, &conn)?; let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, &conn).map_res("Two factor not found")?;
let generated_token = crypto::generate_token(CONFIG.email_token_size())?; let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
@@ -67,7 +65,7 @@ pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
twofactor.data = twofactor_data.to_json(); twofactor.data = twofactor_data.to_json();
twofactor.save(&conn)?; twofactor.save(&conn)?;
mail::send_token(&twofactor_data.email, &twofactor_data.last_token?)?; mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
Ok(()) Ok(())
} }
@@ -134,7 +132,7 @@ fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -
); );
twofactor.save(&conn)?; twofactor.save(&conn)?;
mail::send_token(&twofactor_data.email, &twofactor_data.last_token?)?; mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
Ok(()) Ok(())
} }
@@ -158,7 +156,7 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
} }
let type_ = TwoFactorType::EmailVerificationChallenge as i32; let type_ = TwoFactorType::EmailVerificationChallenge as i32;
let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn)?; let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).map_res("Two factor not found")?;
let mut email_data = EmailTokenData::from_json(&twofactor.data)?; let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
@@ -188,7 +186,7 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
/// Validate the email code when used as TwoFactor token mechanism /// Validate the email code when used as TwoFactor token mechanism
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult { pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
let mut email_data = EmailTokenData::from_json(&data)?; let mut email_data = EmailTokenData::from_json(&data)?;
let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn)?; let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn).map_res("Two factor not found")?;
let issued_token = match &email_data.last_token { let issued_token = match &email_data.last_token {
Some(t) => t, Some(t) => t,
_ => err!("No token available"), _ => err!("No token available"),
@@ -211,7 +209,7 @@ pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &
let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0); let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
let max_time = CONFIG.email_expiration_time() as i64; let max_time = CONFIG.email_expiration_time() as i64;
if date.add(Duration::seconds(max_time)) < Utc::now().naive_utc() { if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
err!("Token has expired") err!("Token has expired")
} }

View File

@@ -3,12 +3,14 @@ use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use crate::api::{JsonResult, JsonUpcase, NumberOrString, PasswordData}; use crate::{
use crate::auth::Headers; api::{JsonResult, JsonUpcase, NumberOrString, PasswordData},
use crate::crypto; auth::Headers,
use crate::db::{ crypto,
models::{TwoFactor, User}, db::{
DbConn, models::{TwoFactor, User},
DbConn,
},
}; };
pub mod authenticator; pub mod authenticator;

View File

@@ -2,19 +2,25 @@ use once_cell::sync::Lazy;
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use u2f::messages::{RegisterResponse, SignResponse, U2fSignRequest}; use u2f::{
use u2f::protocol::{Challenge, U2f}; messages::{RegisterResponse, SignResponse, U2fSignRequest},
use u2f::register::Registration; protocol::{Challenge, U2f},
register::Registration,
use crate::api::core::two_factor::_generate_recover_code; };
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
use crate::auth::Headers; use crate::{
use crate::db::{ api::{
models::{TwoFactor, TwoFactorType}, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString,
DbConn, PasswordData,
},
auth::Headers,
db::{
models::{TwoFactor, TwoFactorType},
DbConn,
},
error::Error,
CONFIG,
}; };
use crate::error::Error;
use crate::CONFIG;
const U2F_VERSION: &str = "U2F_V2"; const U2F_VERSION: &str = "U2F_V2";

View File

@@ -1,18 +1,18 @@
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use yubico::config::Config; use yubico::{config::Config, verify};
use yubico::verify;
use crate::api::core::two_factor::_generate_recover_code; use crate::{
use crate::api::{EmptyResult, JsonResult, JsonUpcase, PasswordData}; api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
use crate::auth::Headers; auth::Headers,
use crate::db::{ db::{
models::{TwoFactor, TwoFactorType}, models::{TwoFactor, TwoFactorType},
DbConn, DbConn,
},
error::{Error, MapResult},
CONFIG,
}; };
use crate::error::{Error, MapResult};
use crate::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![generate_yubikey, activate_yubikey, activate_yubikey_put,] routes![generate_yubikey, activate_yubikey, activate_yubikey_put,]

View File

@@ -1,23 +1,17 @@
use std::{
fs::{create_dir_all, remove_file, symlink_metadata, File},
io::prelude::*,
net::{IpAddr, ToSocketAddrs},
time::{Duration, SystemTime},
};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use std::fs::{create_dir_all, remove_file, symlink_metadata, File};
use std::io::prelude::*;
use std::net::ToSocketAddrs;
use std::time::{Duration, SystemTime};
use rocket::http::ContentType;
use rocket::response::Content;
use rocket::Route;
use reqwest::{Url, header::HeaderMap, blocking::Client, blocking::Response};
use rocket::http::Cookie;
use regex::Regex; use regex::Regex;
use reqwest::{blocking::Client, blocking::Response, header::HeaderMap, Url};
use rocket::{http::ContentType, http::Cookie, response::Content, Route};
use soup::prelude::*; use soup::prelude::*;
use crate::error::Error; use crate::{error::Error, util::Cached, CONFIG};
use crate::CONFIG;
use crate::util::Cached;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![icon] routes![icon]
@@ -36,6 +30,11 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
.unwrap() .unwrap()
}); });
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"icon$|apple.*icon").unwrap());
static ICON_HREF_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new(r"(?i)\w+\.(jpg|jpeg|png|ico)(\?.*)?$|^data:image.*base64").unwrap());
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
fn is_valid_domain(domain: &str) -> bool { fn is_valid_domain(domain: &str) -> bool {
// Don't allow empty or too big domains or path traversal // Don't allow empty or too big domains or path traversal
if domain.is_empty() || domain.len() > 255 || domain.contains("..") { if domain.is_empty() || domain.len() > 255 || domain.contains("..") {
@@ -64,13 +63,111 @@ fn icon(domain: String) -> Cached<Content<Vec<u8>>> {
Cached::long(Content(icon_type, get_icon(&domain))) Cached::long(Content(icon_type, get_icon(&domain)))
} }
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
#[cfg(not(feature = "unstable"))]
fn is_global(ip: IpAddr) -> bool {
match ip {
IpAddr::V4(ip) => {
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
// globally routable addresses in the 192.0.0.0/24 range.
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
return true;
}
!ip.is_private()
&& !ip.is_loopback()
&& !ip.is_link_local()
&& !ip.is_broadcast()
&& !ip.is_documentation()
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
// Make sure the address is not in 0.0.0.0/8
&& ip.octets()[0] != 0
}
IpAddr::V6(ip) => {
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
true
} else {
!ip.is_multicast()
&& !ip.is_loopback()
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
&& !ip.is_unspecified()
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
}
}
}
}
#[cfg(feature = "unstable")]
fn is_global(ip: IpAddr) -> bool {
ip.is_global()
}
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo test --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use ring::rand::{SecureRandom, SystemRandom};
let mut v = [0u8; 16];
let rand = SystemRandom::new();
for i in 0..1_000 {
println!("Iter: {}/1_000", i);
for _ in 0..10_000_000 {
rand.fill(&mut v).expect("Error generating random values");
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
(v[14] as u16) << 8 | v[15] as u16,
(v[12] as u16) << 8 | v[13] as u16,
(v[10] as u16) << 8 | v[11] as u16,
(v[8] as u16) << 8 | v[9] as u16,
(v[6] as u16) << 8 | v[7] as u16,
(v[4] as u16) << 8 | v[5] as u16,
(v[2] as u16) << 8 | v[3] as u16,
(v[0] as u16) << 8 | v[1] as u16,
));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
fn check_icon_domain_is_blacklisted(domain: &str) -> bool { fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips() let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips()
&& (domain, 0) && (domain, 0)
.to_socket_addrs() .to_socket_addrs()
.map(|x| { .map(|x| {
for ip_port in x { for ip_port in x {
if !ip_port.ip().is_global() { if !is_global(ip_port.ip()) {
warn!("IP {} for domain '{}' is not a global IP!", ip_port.ip(), domain); warn!("IP {} for domain '{}' is not a global IP!", ip_port.ip(), domain);
return true; return true;
} }
@@ -241,8 +338,8 @@ fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
// Search for and filter // Search for and filter
let favicons = soup let favicons = soup
.tag("link") .tag("link")
.attr("rel", Regex::new(r"icon$|apple.*icon")?) // Only use icon rels .attr("rel", ICON_REL_REGEX.clone()) // Only use icon rels
.attr("href", Regex::new(r"(?i)\w+\.(jpg|jpeg|png|ico)(\?.*)?$|^data:image.*base64")?) // Only allow specific extensions .attr("href", ICON_HREF_REGEX.clone()) // Only allow specific extensions
.find_all(); .find_all();
// Loop through all the found icons and determine it's priority // Loop through all the found icons and determine it's priority
@@ -354,7 +451,7 @@ fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
let mut height: u16 = 0; let mut height: u16 = 0;
if let Some(sizes) = sizes { if let Some(sizes) = sizes {
match Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap().captures(sizes.trim()) { match ICON_SIZE_REGEX.captures(sizes.trim()) {
None => {} None => {}
Some(dimensions) => { Some(dimensions) => {
if dimensions.len() >= 3 { if dimensions.len() >= 3 {

View File

@@ -1,19 +1,22 @@
use chrono::Utc; use chrono::Local;
use num_traits::FromPrimitive; use num_traits::FromPrimitive;
use rocket::request::{Form, FormItems, FromForm}; use rocket::{
use rocket::Route; request::{Form, FormItems, FromForm},
Route,
};
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use crate::api::core::two_factor::email::EmailTokenData; use crate::{
use crate::api::core::two_factor::{duo, email, yubikey}; api::{
use crate::api::{ApiResult, EmptyResult, JsonResult}; core::two_factor::{duo, email, email::EmailTokenData, yubikey},
use crate::auth::ClientIp; ApiResult, EmptyResult, JsonResult,
use crate::db::models::*; },
use crate::db::DbConn; auth::ClientIp,
use crate::mail; db::{models::*, DbConn},
use crate::util; error::MapResult,
use crate::CONFIG; mail, util, CONFIG,
};
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![login] routes![login]
@@ -49,10 +52,7 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
let token = data.refresh_token.unwrap(); let token = data.refresh_token.unwrap();
// Get device by refresh token // Get device by refresh token
let mut device = match Device::find_by_refresh_token(&token, &conn) { let mut device = Device::find_by_refresh_token(&token, &conn).map_res("Invalid refresh token")?;
Some(device) => device,
None => err!("Invalid refresh token"),
};
// COMMON // COMMON
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap(); let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
@@ -97,8 +97,10 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
) )
} }
let now = Local::now();
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() { if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
let now = Utc::now().naive_utc(); let now = now.naive_utc();
if user.last_verifying_at.is_none() || now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds() > CONFIG.signups_verify_resend_time() as i64 { if user.last_verifying_at.is_none() || now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds() > CONFIG.signups_verify_resend_time() as i64 {
let resend_limit = CONFIG.signups_verify_resend_limit() as i32; let resend_limit = CONFIG.signups_verify_resend_limit() as i32;
if resend_limit == 0 || user.login_verify_count < resend_limit { if resend_limit == 0 || user.login_verify_count < resend_limit {
@@ -130,7 +132,7 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &ip, &conn)?; let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &ip, &conn)?;
if CONFIG.mail_enabled() && new_device { if CONFIG.mail_enabled() && new_device {
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &device.updated_at, &device.name) { if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) {
error!("Error sending new device email: {:#?}", e); error!("Error sending new device email: {:#?}", e);
if CONFIG.require_device_email() { if CONFIG.require_device_email() {
@@ -252,10 +254,7 @@ fn twofactor_auth(
} }
fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> { fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
match tf { tf.map(|t| t.data).map_res("Two factor doesn't exist")
Some(tf) => Ok(tf.data),
None => err!("Two factor doesn't exist"),
}
} }
fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> { fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {

View File

@@ -5,23 +5,25 @@ mod identity;
mod notifications; mod notifications;
mod web; mod web;
pub use self::admin::routes as admin_routes;
pub use self::core::routes as core_routes;
pub use self::icons::routes as icons_routes;
pub use self::identity::routes as identity_routes;
pub use self::notifications::routes as notifications_routes;
pub use self::notifications::{start_notification_server, Notify, UpdateType};
pub use self::web::routes as web_routes;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
pub use crate::api::{
admin::routes as admin_routes,
core::routes as core_routes,
icons::routes as icons_routes,
identity::routes as identity_routes,
notifications::routes as notifications_routes,
notifications::{start_notification_server, Notify, UpdateType},
web::routes as web_routes,
};
use crate::util;
// Type aliases for API methods results // Type aliases for API methods results
type ApiResult<T> = Result<T, crate::error::Error>; type ApiResult<T> = Result<T, crate::error::Error>;
pub type JsonResult = ApiResult<Json<Value>>; pub type JsonResult = ApiResult<Json<Value>>;
pub type EmptyResult = ApiResult<()>; pub type EmptyResult = ApiResult<()>;
use crate::util;
type JsonUpcase<T> = Json<util::UpCase<T>>; type JsonUpcase<T> = Json<util::UpCase<T>>;
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>; type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;

View File

@@ -4,11 +4,12 @@ use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value as JsonValue; use serde_json::Value as JsonValue;
use crate::api::{EmptyResult, JsonResult}; use crate::{
use crate::auth::Headers; api::{EmptyResult, JsonResult},
use crate::db::DbConn; auth::Headers,
db::DbConn,
use crate::{Error, CONFIG}; Error, CONFIG,
};
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![negotiate, websockets_err] routes![negotiate, websockets_err]

View File

@@ -1,15 +1,10 @@
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use rocket::http::ContentType; use rocket::{http::ContentType, response::content::Content, response::NamedFile, Route};
use rocket::response::content::Content;
use rocket::response::NamedFile;
use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use crate::error::Error; use crate::{error::Error, util::Cached, CONFIG};
use crate::util::Cached;
use crate::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
// If addding more routes here, consider also adding them to // If addding more routes here, consider also adding them to

View File

@@ -1,17 +1,19 @@
// //
// JWT Handling // JWT Handling
// //
use crate::util::read_file;
use chrono::{Duration, Utc}; use chrono::{Duration, Utc};
use once_cell::sync::Lazy;
use num_traits::FromPrimitive; use num_traits::FromPrimitive;
use once_cell::sync::Lazy;
use jsonwebtoken::{self, Algorithm, Header, EncodingKey, DecodingKey}; use jsonwebtoken::{self, Algorithm, DecodingKey, EncodingKey, Header};
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use serde::ser::Serialize; use serde::ser::Serialize;
use crate::error::{Error, MapResult}; use crate::{
use crate::CONFIG; error::{Error, MapResult},
util::read_file,
CONFIG,
};
const JWT_ALGORITHM: Algorithm = Algorithm::RS256; const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
@@ -213,11 +215,14 @@ pub fn generate_admin_claims() -> AdminJWTClaims {
// //
// Bearer token authentication // Bearer token authentication
// //
use rocket::request::{self, FromRequest, Request}; use rocket::{
use rocket::Outcome; request::{FromRequest, Request, Outcome},
};
use crate::db::models::{Device, User, UserOrgStatus, UserOrgType, UserOrganization}; use crate::db::{
use crate::db::DbConn; models::{Device, User, UserOrgStatus, UserOrgType, UserOrganization},
DbConn,
};
pub struct Headers { pub struct Headers {
pub host: String, pub host: String,
@@ -228,7 +233,7 @@ pub struct Headers {
impl<'a, 'r> FromRequest<'a, 'r> for Headers { impl<'a, 'r> FromRequest<'a, 'r> for Headers {
type Error = &'static str; type Error = &'static str;
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> { fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
let headers = request.headers(); let headers = request.headers();
// Get host // Get host
@@ -329,7 +334,7 @@ fn get_org_id(request: &Request) -> Option<String> {
impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders { impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
type Error = &'static str; type Error = &'static str;
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> { fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
match request.guard::<Headers>() { match request.guard::<Headers>() {
Outcome::Forward(_) => Outcome::Forward(()), Outcome::Forward(_) => Outcome::Forward(()),
Outcome::Failure(f) => Outcome::Failure(f), Outcome::Failure(f) => Outcome::Failure(f),
@@ -366,7 +371,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
} }
}, },
}) })
}, }
_ => err_handler!("Error getting the organization id"), _ => err_handler!("Error getting the organization id"),
} }
} }
@@ -384,7 +389,7 @@ pub struct AdminHeaders {
impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders { impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
type Error = &'static str; type Error = &'static str;
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> { fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
match request.guard::<OrgHeaders>() { match request.guard::<OrgHeaders>() {
Outcome::Forward(_) => Outcome::Forward(()), Outcome::Forward(_) => Outcome::Forward(()),
Outcome::Failure(f) => Outcome::Failure(f), Outcome::Failure(f) => Outcome::Failure(f),
@@ -409,9 +414,9 @@ impl Into<Headers> for AdminHeaders {
Headers { Headers {
host: self.host, host: self.host,
device: self.device, device: self.device,
user: self.user user: self.user,
} }
} }
} }
pub struct OwnerHeaders { pub struct OwnerHeaders {
@@ -423,7 +428,7 @@ pub struct OwnerHeaders {
impl<'a, 'r> FromRequest<'a, 'r> for OwnerHeaders { impl<'a, 'r> FromRequest<'a, 'r> for OwnerHeaders {
type Error = &'static str; type Error = &'static str;
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> { fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
match request.guard::<OrgHeaders>() { match request.guard::<OrgHeaders>() {
Outcome::Forward(_) => Outcome::Forward(()), Outcome::Forward(_) => Outcome::Forward(()),
Outcome::Failure(f) => Outcome::Failure(f), Outcome::Failure(f) => Outcome::Failure(f),
@@ -454,7 +459,7 @@ pub struct ClientIp {
impl<'a, 'r> FromRequest<'a, 'r> for ClientIp { impl<'a, 'r> FromRequest<'a, 'r> for ClientIp {
type Error = (); type Error = ();
fn from_request(req: &'a Request<'r>) -> request::Outcome<Self, Self::Error> { fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
let ip = if CONFIG._ip_header_enabled() { let ip = if CONFIG._ip_header_enabled() {
req.headers().get_one(&CONFIG.ip_header()).and_then(|ip| { req.headers().get_one(&CONFIG.ip_header()).and_then(|ip| {
match ip.find(',') { match ip.find(',') {

View File

@@ -1,11 +1,13 @@
use once_cell::sync::Lazy;
use std::process::exit; use std::process::exit;
use std::sync::RwLock; use std::sync::RwLock;
use once_cell::sync::Lazy;
use reqwest::Url; use reqwest::Url;
use crate::error::Error; use crate::{
use crate::util::{get_env, get_env_bool}; error::Error,
util::{get_env, get_env_bool},
};
static CONFIG_FILE: Lazy<String> = Lazy::new(|| { static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
let data_folder = get_env("DATA_FOLDER").unwrap_or_else(|| String::from("data")); let data_folder = get_env("DATA_FOLDER").unwrap_or_else(|| String::from("data"));
@@ -327,6 +329,8 @@ make_config! {
reload_templates: bool, true, def, false; reload_templates: bool, true, def, false;
/// Enable extended logging /// Enable extended logging
extended_logging: bool, false, def, true; extended_logging: bool, false, def, true;
/// Log timestamp format
log_timestamp_format: String, true, def, "%Y-%m-%d %H:%M:%S.%3f".to_string();
/// Enable the log to output to Syslog /// Enable the log to output to Syslog
use_syslog: bool, false, def, false; use_syslog: bool, false, def, false;
/// Log file path /// Log file path
@@ -394,7 +398,9 @@ make_config! {
/// Json form auth mechanism |> Defaults for ssl is "Plain" and "Login" and nothing for non-ssl connections. Possible values: ["Plain", "Login", "Xoauth2"] /// Json form auth mechanism |> Defaults for ssl is "Plain" and "Login" and nothing for non-ssl connections. Possible values: ["Plain", "Login", "Xoauth2"]
smtp_auth_mechanism: String, true, option; smtp_auth_mechanism: String, true, option;
/// SMTP connection timeout |> Number of seconds when to stop trying to connect to the SMTP server /// SMTP connection timeout |> Number of seconds when to stop trying to connect to the SMTP server
smtp_timeout: u64, true, def, 15; smtp_timeout: u64, true, def, 15;
/// Server name sent during HELO |> By default this value should be is on the machine's hostname, but might need to be changed in case it trips some anti-spam filters
helo_name: String, true, option;
}, },
/// Email 2FA Settings /// Email 2FA Settings
@@ -412,7 +418,9 @@ make_config! {
fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
let db_url = cfg.database_url.to_lowercase(); let db_url = cfg.database_url.to_lowercase();
if cfg!(feature = "sqlite") && (db_url.starts_with("mysql:") || db_url.starts_with("postgresql:")) { if cfg!(feature = "sqlite")
&& (db_url.starts_with("mysql:") || db_url.starts_with("postgresql:") || db_url.starts_with("postgres:"))
{
err!("`DATABASE_URL` is meant for MySQL or Postgres, while this server is meant for SQLite") err!("`DATABASE_URL` is meant for MySQL or Postgres, while this server is meant for SQLite")
} }
@@ -420,7 +428,7 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
err!("`DATABASE_URL` should start with mysql: when using the MySQL server") err!("`DATABASE_URL` should start with mysql: when using the MySQL server")
} }
if cfg!(feature = "postgresql") && !db_url.starts_with("postgresql:") { if cfg!(feature = "postgresql") && !(db_url.starts_with("postgresql:") || db_url.starts_with("postgres:")) {
err!("`DATABASE_URL` should start with postgresql: when using the PostgreSQL server") err!("`DATABASE_URL` should start with postgresql: when using the PostgreSQL server")
} }

View File

@@ -1,10 +1,11 @@
// //
// PBKDF2 derivation // PBKDF2 derivation
// //
use std::num::NonZeroU32;
use ring::{digest, hmac, pbkdf2};
use crate::error::Error; use crate::error::Error;
use ring::{digest, hmac, pbkdf2};
use std::num::NonZeroU32;
static DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256; static DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256;
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN; const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;

View File

@@ -1,18 +1,14 @@
use std::ops::Deref;
use diesel::r2d2;
use diesel::r2d2::ConnectionManager;
use diesel::{Connection as DieselConnection, ConnectionError};
use rocket::http::Status;
use rocket::request::{self, FromRequest};
use rocket::{Outcome, Request, State};
use crate::error::Error;
use chrono::prelude::*;
use std::process::Command; use std::process::Command;
use crate::CONFIG; use chrono::prelude::*;
use diesel::{r2d2, r2d2::ConnectionManager, Connection as DieselConnection, ConnectionError};
use rocket::{
http::Status,
request::{FromRequest, Outcome},
Request, State,
};
use crate::{error::Error, CONFIG};
/// An alias to the database connection used /// An alias to the database connection used
#[cfg(feature = "sqlite")] #[cfg(feature = "sqlite")]
@@ -75,7 +71,7 @@ pub fn backup_database() -> Result<(), Error> {
impl<'a, 'r> FromRequest<'a, 'r> for DbConn { impl<'a, 'r> FromRequest<'a, 'r> for DbConn {
type Error = (); type Error = ();
fn from_request(request: &'a Request<'r>) -> request::Outcome<DbConn, ()> { fn from_request(request: &'a Request<'r>) -> Outcome<DbConn, ()> {
// https://github.com/SergioBenitez/Rocket/commit/e3c1a4ad3ab9b840482ec6de4200d30df43e357c // https://github.com/SergioBenitez/Rocket/commit/e3c1a4ad3ab9b840482ec6de4200d30df43e357c
let pool = try_outcome!(request.guard::<State<Pool>>()); let pool = try_outcome!(request.guard::<State<Pool>>());
match pool.get() { match pool.get() {
@@ -86,7 +82,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for DbConn {
} }
// For the convenience of using an &DbConn as a &Database. // For the convenience of using an &DbConn as a &Database.
impl Deref for DbConn { impl std::ops::Deref for DbConn {
type Target = Connection; type Target = Connection;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&self.0 &self.0

View File

@@ -82,6 +82,15 @@ impl Cipher {
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null); let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
let password_history_json = self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null); let password_history_json = self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
let (read_only, hide_passwords) =
match self.get_access_restrictions(&user_uuid, &conn) {
Some((ro, hp)) => (ro, hp),
None => {
error!("Cipher ownership assertion failure");
(true, true)
},
};
// Get the data or a default empty value to avoid issues with the mobile apps // Get the data or a default empty value to avoid issues with the mobile apps
let mut data_json: Value = serde_json::from_str(&self.data).unwrap_or_else(|_| json!({ let mut data_json: Value = serde_json::from_str(&self.data).unwrap_or_else(|_| json!({
"Fields":null, "Fields":null,
@@ -105,7 +114,15 @@ impl Cipher {
} }
// TODO: ******* Backwards compat end ********** // TODO: ******* Backwards compat end **********
// There are three types of cipher response models in upstream
// Bitwarden: "cipherMini", "cipher", and "cipherDetails" (in order
// of increasing level of detail). bitwarden_rs currently only
// supports the "cipherDetails" type, though it seems like the
// Bitwarden clients will ignore extra fields.
//
// Ref: https://github.com/bitwarden/server/blob/master/src/Core/Models/Api/Response/CipherResponseModel.cs
let mut json_object = json!({ let mut json_object = json!({
"Object": "cipherDetails",
"Id": self.uuid, "Id": self.uuid,
"Type": self.atype, "Type": self.atype,
"RevisionDate": format_date(&self.updated_at), "RevisionDate": format_date(&self.updated_at),
@@ -115,6 +132,8 @@ impl Cipher {
"OrganizationId": self.organization_uuid, "OrganizationId": self.organization_uuid,
"Attachments": attachments_json, "Attachments": attachments_json,
"OrganizationUseTotp": true, "OrganizationUseTotp": true,
// This field is specific to the cipherDetails type.
"CollectionIds": self.get_collections(user_uuid, &conn), "CollectionIds": self.get_collections(user_uuid, &conn),
"Name": self.name, "Name": self.name,
@@ -123,8 +142,11 @@ impl Cipher {
"Data": data_json, "Data": data_json,
"Object": "cipher", // These values are true by default, but can be false if the
"Edit": true, // cipher belongs to a collection where the org owner has enabled
// the "Read Only" or "Hide Passwords" restrictions for the user.
"Edit": !read_only,
"ViewPassword": !hide_passwords,
"PasswordHistory": password_history_json, "PasswordHistory": password_history_json,
}); });
@@ -241,64 +263,78 @@ impl Cipher {
} }
} }
pub fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool { /// Returns whether this cipher is directly owned by the user.
ciphers::table pub fn is_owned_by_user(&self, user_uuid: &str) -> bool {
self.user_uuid.is_some() && self.user_uuid.as_ref().unwrap() == user_uuid
}
/// Returns whether this cipher is owned by an org in which the user has full access.
pub fn is_in_full_access_org(&self, user_uuid: &str, conn: &DbConn) -> bool {
if let Some(ref org_uuid) = self.organization_uuid {
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user_uuid, &org_uuid, &conn) {
return user_org.has_full_access();
}
}
false
}
/// Returns the user's access restrictions to this cipher. A return value
/// of None means that this cipher does not belong to the user, and is
/// not in any collection the user has access to. Otherwise, the user has
/// access to this cipher, and Some(read_only, hide_passwords) represents
/// the access restrictions.
pub fn get_access_restrictions(&self, user_uuid: &str, conn: &DbConn) -> Option<(bool, bool)> {
// Check whether this cipher is directly owned by the user, or is in
// a collection that the user has full access to. If so, there are no
// access restrictions.
if self.is_owned_by_user(&user_uuid) || self.is_in_full_access_org(&user_uuid, &conn) {
return Some((false, false));
}
// Check whether this cipher is in any collections accessible to the
// user. If so, retrieve the access flags for each collection.
let query = ciphers::table
.filter(ciphers::uuid.eq(&self.uuid)) .filter(ciphers::uuid.eq(&self.uuid))
.left_join( .inner_join(ciphers_collections::table.on(
users_organizations::table.on(ciphers::organization_uuid ciphers::uuid.eq(ciphers_collections::cipher_uuid)))
.eq(users_organizations::org_uuid.nullable()) .inner_join(users_collections::table.on(
.and(users_organizations::user_uuid.eq(user_uuid))), ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
) .and(users_collections::user_uuid.eq(user_uuid))))
.left_join(ciphers_collections::table) .select((users_collections::read_only, users_collections::hide_passwords));
.left_join(
users_collections::table // There's an edge case where a cipher can be in multiple collections
.on(ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)), // with inconsistent access flags. For example, a cipher could be in
) // one collection where the user has read-only access, but also in
.filter(ciphers::user_uuid.eq(user_uuid).or( // another collection where the user has read/write access. To handle
// Cipher owner // this, we do a boolean OR of all values in each of the `read_only`
users_organizations::access_all.eq(true).or( // and `hide_passwords` columns. This could ideally be done as part
// access_all in Organization // of the query, but Diesel doesn't support a max() or bool_or()
users_organizations::atype.le(UserOrgType::Admin as i32).or( // function on booleans and this behavior isn't portable anyway.
// Org admin or owner if let Some(vec) = query.load::<(bool, bool)>(&**conn).ok() {
users_collections::user_uuid.eq(user_uuid).and( let mut read_only = false;
users_collections::read_only.eq(false), //R/W access to collection let mut hide_passwords = false;
), for (ro, hp) in vec.iter() {
), read_only |= ro;
), hide_passwords |= hp;
)) }
.select(ciphers::all_columns)
.first::<Self>(&**conn) Some((read_only, hide_passwords))
.ok() } else {
.is_some() // This cipher isn't in any collections accessible to the user.
None
}
}
pub fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
match self.get_access_restrictions(&user_uuid, &conn) {
Some((read_only, _hide_passwords)) => !read_only,
None => false,
}
} }
pub fn is_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool { pub fn is_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
ciphers::table self.get_access_restrictions(&user_uuid, &conn).is_some()
.filter(ciphers::uuid.eq(&self.uuid))
.left_join(
users_organizations::table.on(ciphers::organization_uuid
.eq(users_organizations::org_uuid.nullable())
.and(users_organizations::user_uuid.eq(user_uuid))),
)
.left_join(ciphers_collections::table)
.left_join(
users_collections::table
.on(ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)),
)
.filter(ciphers::user_uuid.eq(user_uuid).or(
// Cipher owner
users_organizations::access_all.eq(true).or(
// access_all in Organization
users_organizations::atype.le(UserOrgType::Admin as i32).or(
// Org admin or owner
users_collections::user_uuid.eq(user_uuid), // Access to Collection
),
),
))
.select(ciphers::all_columns)
.first::<Self>(&**conn)
.ok()
.is_some()
} }
pub fn get_folder_uuid(&self, user_uuid: &str, conn: &DbConn) -> Option<String> { pub fn get_folder_uuid(&self, user_uuid: &str, conn: &DbConn) -> Option<String> {

View File

@@ -199,6 +199,7 @@ pub struct CollectionUser {
pub user_uuid: String, pub user_uuid: String,
pub collection_uuid: String, pub collection_uuid: String,
pub read_only: bool, pub read_only: bool,
pub hide_passwords: bool,
} }
/// Database methods /// Database methods
@@ -214,7 +215,7 @@ impl CollectionUser {
} }
#[cfg(feature = "postgresql")] #[cfg(feature = "postgresql")]
pub fn save(user_uuid: &str, collection_uuid: &str, read_only: bool, conn: &DbConn) -> EmptyResult { pub fn save(user_uuid: &str, collection_uuid: &str, read_only: bool, hide_passwords: bool, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&user_uuid, conn); User::update_uuid_revision(&user_uuid, conn);
diesel::insert_into(users_collections::table) diesel::insert_into(users_collections::table)
@@ -222,16 +223,20 @@ impl CollectionUser {
users_collections::user_uuid.eq(user_uuid), users_collections::user_uuid.eq(user_uuid),
users_collections::collection_uuid.eq(collection_uuid), users_collections::collection_uuid.eq(collection_uuid),
users_collections::read_only.eq(read_only), users_collections::read_only.eq(read_only),
users_collections::hide_passwords.eq(hide_passwords),
)) ))
.on_conflict((users_collections::user_uuid, users_collections::collection_uuid)) .on_conflict((users_collections::user_uuid, users_collections::collection_uuid))
.do_update() .do_update()
.set(users_collections::read_only.eq(read_only)) .set((
users_collections::read_only.eq(read_only),
users_collections::hide_passwords.eq(hide_passwords),
))
.execute(&**conn) .execute(&**conn)
.map_res("Error adding user to collection") .map_res("Error adding user to collection")
} }
#[cfg(not(feature = "postgresql"))] #[cfg(not(feature = "postgresql"))]
pub fn save(user_uuid: &str, collection_uuid: &str, read_only: bool, conn: &DbConn) -> EmptyResult { pub fn save(user_uuid: &str, collection_uuid: &str, read_only: bool, hide_passwords: bool, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&user_uuid, conn); User::update_uuid_revision(&user_uuid, conn);
diesel::replace_into(users_collections::table) diesel::replace_into(users_collections::table)
@@ -239,6 +244,7 @@ impl CollectionUser {
users_collections::user_uuid.eq(user_uuid), users_collections::user_uuid.eq(user_uuid),
users_collections::collection_uuid.eq(collection_uuid), users_collections::collection_uuid.eq(collection_uuid),
users_collections::read_only.eq(read_only), users_collections::read_only.eq(read_only),
users_collections::hide_passwords.eq(hide_passwords),
)) ))
.execute(&**conn) .execute(&**conn)
.map_res("Error adding user to collection") .map_res("Error adding user to collection")

View File

@@ -310,10 +310,11 @@ impl UserOrganization {
}) })
} }
pub fn to_json_read_only(&self, read_only: bool) -> Value { pub fn to_json_user_access_restrictions(&self, col_user: &CollectionUser) -> Value {
json!({ json!({
"Id": self.uuid, "Id": self.uuid,
"ReadOnly": read_only "ReadOnly": col_user.read_only,
"HidePasswords": col_user.hide_passwords,
}) })
} }
@@ -324,7 +325,11 @@ impl UserOrganization {
let collections = CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn); let collections = CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn);
collections collections
.iter() .iter()
.map(|c| json!({"Id": c.collection_uuid, "ReadOnly": c.read_only})) .map(|c| json!({
"Id": c.collection_uuid,
"ReadOnly": c.read_only,
"HidePasswords": c.hide_passwords,
}))
.collect() .collect()
}; };
@@ -388,8 +393,13 @@ impl UserOrganization {
Ok(()) Ok(())
} }
pub fn has_status(self, status: UserOrgStatus) -> bool {
self.status == status as i32
}
pub fn has_full_access(self) -> bool { pub fn has_full_access(self) -> bool {
self.access_all || self.atype >= UserOrgType::Admin (self.access_all || self.atype >= UserOrgType::Admin) &&
self.has_status(UserOrgStatus::Confirmed)
} }
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> { pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {

View File

@@ -141,6 +141,7 @@ table! {
user_uuid -> Varchar, user_uuid -> Varchar,
collection_uuid -> Varchar, collection_uuid -> Varchar,
read_only -> Bool, read_only -> Bool,
hide_passwords -> Bool,
} }
} }

View File

@@ -141,6 +141,7 @@ table! {
user_uuid -> Text, user_uuid -> Text,
collection_uuid -> Text, collection_uuid -> Text,
read_only -> Bool, read_only -> Bool,
hide_passwords -> Bool,
} }
} }

View File

@@ -141,6 +141,7 @@ table! {
user_uuid -> Text, user_uuid -> Text,
collection_uuid -> Text, collection_uuid -> Text,
read_only -> Bool, read_only -> Bool,
hide_passwords -> Bool,
} }
} }

View File

@@ -41,7 +41,6 @@ use reqwest::Error as ReqErr;
use serde_json::{Error as SerdeErr, Value}; use serde_json::{Error as SerdeErr, Value};
use std::io::Error as IOErr; use std::io::Error as IOErr;
use std::option::NoneError as NoneErr;
use std::time::SystemTimeError as TimeErr; use std::time::SystemTimeError as TimeErr;
use u2f::u2ferror::U2fError as U2fErr; use u2f::u2ferror::U2fError as U2fErr;
use yubico::yubicoerror::YubicoError as YubiErr; use yubico::yubicoerror::YubicoError as YubiErr;
@@ -84,13 +83,6 @@ make_error! {
FromStrError(FromStrErr): _has_source, _api_error, FromStrError(FromStrErr): _has_source, _api_error,
} }
// This is implemented by hand because NoneError doesn't implement neither Display nor Error
impl From<NoneErr> for Error {
fn from(_: NoneErr) -> Self {
Error::from(("NoneError", String::new()))
}
}
impl std::fmt::Debug for Error { impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self.source() { match self.source() {
@@ -241,10 +233,10 @@ macro_rules! err_json {
macro_rules! err_handler { macro_rules! err_handler {
($expr:expr) => {{ ($expr:expr) => {{
error!(target: "auth", "Unauthorized Error: {}", $expr); error!(target: "auth", "Unauthorized Error: {}", $expr);
return rocket::Outcome::Failure((rocket::http::Status::Unauthorized, $expr)); return ::rocket::request::Outcome::Failure((rocket::http::Status::Unauthorized, $expr));
}}; }};
($usr_msg:expr, $log_value:expr) => {{ ($usr_msg:expr, $log_value:expr) => {{
error!(target: "auth", "Unauthorized Error: {}. {}", $usr_msg, $log_value); error!(target: "auth", "Unauthorized Error: {}. {}", $usr_msg, $log_value);
return rocket::Outcome::Failure((rocket::http::Status::Unauthorized, $usr_msg)); return ::rocket::request::Outcome::Failure((rocket::http::Status::Unauthorized, $usr_msg));
}}; }};
} }

View File

@@ -1,17 +1,23 @@
use std::str::FromStr; use std::{env, str::FromStr};
use lettre::message::{header, Mailbox, Message, MultiPart, SinglePart};
use lettre::transport::smtp::authentication::{Credentials, Mechanism as SmtpAuthMechanism};
use lettre::{Address, SmtpTransport, Tls, TlsParameters, Transport};
use chrono::{DateTime, Local};
use chrono_tz::Tz;
use native_tls::{Protocol, TlsConnector}; use native_tls::{Protocol, TlsConnector};
use percent_encoding::{percent_encode, NON_ALPHANUMERIC}; use percent_encoding::{percent_encode, NON_ALPHANUMERIC};
use crate::api::EmptyResult; use lettre::{
use crate::auth::{encode_jwt, generate_delete_claims, generate_invite_claims, generate_verify_email_claims}; message::{header, Mailbox, Message, MultiPart, SinglePart},
use crate::error::Error; transport::smtp::authentication::{Credentials, Mechanism as SmtpAuthMechanism},
use crate::CONFIG; transport::smtp::extension::ClientId,
use chrono::NaiveDateTime; Address, SmtpTransport, Tls, TlsParameters, Transport,
};
use crate::{
api::EmptyResult,
auth::{encode_jwt, generate_delete_claims, generate_invite_claims, generate_verify_email_claims},
error::Error,
CONFIG,
};
fn mailer() -> SmtpTransport { fn mailer() -> SmtpTransport {
let host = CONFIG.smtp_host().unwrap(); let host = CONFIG.smtp_host().unwrap();
@@ -42,6 +48,11 @@ fn mailer() -> SmtpTransport {
_ => smtp_client, _ => smtp_client,
}; };
let smtp_client = match CONFIG.helo_name() {
Some(helo_name) => smtp_client.hello_name(ClientId::new(helo_name)),
None => smtp_client,
};
let smtp_client = match CONFIG.smtp_auth_mechanism() { let smtp_client = match CONFIG.smtp_auth_mechanism() {
Some(mechanism) => { Some(mechanism) => {
let correct_mechanism = format!("\"{}\"", crate::util::upcase_first(mechanism.trim_matches('"'))); let correct_mechanism = format!("\"{}\"", crate::util::upcase_first(mechanism.trim_matches('"')));
@@ -81,6 +92,22 @@ fn get_template(template_name: &str, data: &serde_json::Value) -> Result<(String
Ok((subject, body)) Ok((subject, body))
} }
pub fn format_datetime(dt: &DateTime<Local>) -> String {
let fmt = "%A, %B %_d, %Y at %r %Z";
// With a DateTime<Local>, `%Z` formats as the time zone's UTC offset
// (e.g., `+00:00`). If the `TZ` environment variable is set, try to
// format as a time zone abbreviation instead (e.g., `UTC`).
if let Ok(tz) = env::var("TZ") {
if let Ok(tz) = tz.parse::<Tz>() {
return dt.with_timezone(&tz).format(fmt).to_string();
}
}
// Otherwise, fall back to just displaying the UTC offset.
dt.format(fmt).to_string()
}
pub fn send_password_hint(address: &str, hint: Option<String>) -> EmptyResult { pub fn send_password_hint(address: &str, hint: Option<String>) -> EmptyResult {
let template_name = if hint.is_some() { let template_name = if hint.is_some() {
"email/pw_hint_some" "email/pw_hint_some"
@@ -211,19 +238,17 @@ pub fn send_invite_confirmed(address: &str, org_name: &str) -> EmptyResult {
send_email(address, &subject, &body_html, &body_text) send_email(address, &subject, &body_html, &body_text)
} }
pub fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult { pub fn send_new_device_logged_in(address: &str, ip: &str, dt: &DateTime<Local>, device: &str) -> EmptyResult {
use crate::util::upcase_first; use crate::util::upcase_first;
let device = upcase_first(device); let device = upcase_first(device);
let datetime = dt.format("%A, %B %_d, %Y at %H:%M").to_string();
let (subject, body_html, body_text) = get_text( let (subject, body_html, body_text) = get_text(
"email/new_device_logged_in", "email/new_device_logged_in",
json!({ json!({
"url": CONFIG.domain(), "url": CONFIG.domain(),
"ip": ip, "ip": ip,
"device": device, "device": device,
"datetime": datetime, "datetime": format_datetime(dt),
}), }),
)?; )?;

View File

@@ -1,5 +1,5 @@
#![forbid(unsafe_code)] #![forbid(unsafe_code)]
#![feature(proc_macro_hygiene, try_trait, ip)] #![cfg_attr(feature = "unstable", feature(ip))]
#![recursion_limit = "256"] #![recursion_limit = "256"]
extern crate openssl; extern crate openssl;
@@ -17,11 +17,13 @@ extern crate diesel;
extern crate diesel_migrations; extern crate diesel_migrations;
use std::{ use std::{
fmt, // For panic logging
fs::create_dir_all, fs::create_dir_all,
panic,
path::Path, path::Path,
process::{exit, Command}, process::{exit, Command},
str::FromStr, str::FromStr,
panic, thread, fmt // For panic logging thread,
}; };
#[macro_use] #[macro_use]
@@ -128,8 +130,8 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
if CONFIG.extended_logging() { if CONFIG.extended_logging() {
logger = logger.format(|out, message, record| { logger = logger.format(|out, message, record| {
out.finish(format_args!( out.finish(format_args!(
"{}[{}][{}] {}", "[{}][{}][{}] {}",
chrono::Local::now().format("[%Y-%m-%d %H:%M:%S]"), chrono::Local::now().format(&CONFIG.log_timestamp_format()),
record.target(), record.target(),
record.level(), record.level(),
message message
@@ -178,15 +180,13 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
Shim(backtrace) Shim(backtrace)
); );
} }
None => { None => error!(
error!( target: "panic",
target: "panic", "thread '{}' panicked at '{}'{:?}",
"thread '{}' panicked at '{}'{:?}", thread,
thread, msg,
msg, Shim(backtrace)
Shim(backtrace) ),
)
}
} }
})); }));
@@ -336,14 +336,11 @@ mod migrations {
} }
fn launch_rocket(extra_debug: bool) { fn launch_rocket(extra_debug: bool) {
// Create Rocket object, this stores current log level and sets its own
let rocket = rocket::ignite();
let basepath = &CONFIG.domain_path(); let basepath = &CONFIG.domain_path();
// If adding more paths here, consider also adding them to // If adding more paths here, consider also adding them to
// crate::utils::LOGGED_ROUTES to make sure they appear in the log // crate::utils::LOGGED_ROUTES to make sure they appear in the log
let rocket = rocket let result = rocket::ignite()
.mount(&[basepath, "/"].concat(), api::web_routes()) .mount(&[basepath, "/"].concat(), api::web_routes())
.mount(&[basepath, "/api"].concat(), api::core_routes()) .mount(&[basepath, "/api"].concat(), api::core_routes())
.mount(&[basepath, "/admin"].concat(), api::admin_routes()) .mount(&[basepath, "/admin"].concat(), api::admin_routes())
@@ -354,9 +351,10 @@ fn launch_rocket(extra_debug: bool) {
.manage(api::start_notification_server()) .manage(api::start_notification_server())
.attach(util::AppHeaders()) .attach(util::AppHeaders())
.attach(util::CORS()) .attach(util::CORS())
.attach(util::BetterLogging(extra_debug)); .attach(util::BetterLogging(extra_debug))
.launch();
// Launch and print error if there is one // Launch and print error if there is one
// The launch will restore the original logging level // The launch will restore the original logging level
error!("Launch error {:#?}", rocket.launch()); error!("Launch error {:#?}", result);
} }

View File

@@ -1,12 +1,15 @@
// //
// Web Headers and caching // Web Headers and caching
// //
use rocket::fairing::{Fairing, Info, Kind};
use rocket::http::{ContentType, Header, HeaderMap, Method, Status};
use rocket::response::{self, Responder};
use rocket::{Data, Request, Response, Rocket};
use std::io::Cursor; use std::io::Cursor;
use rocket::{
fairing::{Fairing, Info, Kind},
http::{ContentType, Header, HeaderMap, Method, Status},
response::{self, Responder},
Data, Request, Response, Rocket,
};
use crate::CONFIG; use crate::CONFIG;
pub struct AppHeaders(); pub struct AppHeaders();
@@ -157,9 +160,7 @@ impl Fairing for BetterLogging {
} }
let uri = request.uri(); let uri = request.uri();
let uri_path = uri.path(); let uri_path = uri.path();
// FIXME: trim_start_matches() could result in over-trimming in pathological cases; let uri_subpath = uri_path.strip_prefix(&CONFIG.domain_path()).unwrap_or(uri_path);
// strip_prefix() would be a better option once it's stable.
let uri_subpath = uri_path.trim_start_matches(&CONFIG.domain_path());
if self.0 || LOGGED_ROUTES.iter().any(|r| uri_subpath.starts_with(r)) { if self.0 || LOGGED_ROUTES.iter().any(|r| uri_subpath.starts_with(r)) {
match uri.query() { match uri.query() {
Some(q) => info!(target: "request", "{} {}?{}", method, uri_path, &q[..q.len().min(30)]), Some(q) => info!(target: "request", "{} {}?{}", method, uri_path, &q[..q.len().min(30)]),
@@ -172,9 +173,8 @@ impl Fairing for BetterLogging {
if !self.0 && request.method() == Method::Options { if !self.0 && request.method() == Method::Options {
return; return;
} }
// FIXME: trim_start_matches() could result in over-trimming in pathological cases; let uri_path = request.uri().path();
// strip_prefix() would be a better option once it's stable. let uri_subpath = uri_path.strip_prefix(&CONFIG.domain_path()).unwrap_or(uri_path);
let uri_subpath = request.uri().path().trim_start_matches(&CONFIG.domain_path());
if self.0 || LOGGED_ROUTES.iter().any(|r| uri_subpath.starts_with(r)) { if self.0 || LOGGED_ROUTES.iter().any(|r| uri_subpath.starts_with(r)) {
let status = response.status(); let status = response.status();
if let Some(route) = request.route() { if let Some(route) = request.route() {
@@ -189,9 +189,11 @@ impl Fairing for BetterLogging {
// //
// File handling // File handling
// //
use std::fs::{self, File}; use std::{
use std::io::{Read, Result as IOResult}; fs::{self, File},
use std::path::Path; io::{Read, Result as IOResult},
path::Path,
};
pub fn file_exists(path: &str) -> bool { pub fn file_exists(path: &str) -> bool {
Path::new(path).exists() Path::new(path).exists()
@@ -253,7 +255,6 @@ pub fn get_uuid() -> String {
// String util methods // String util methods
// //
use std::ops::Try;
use std::str::FromStr; use std::str::FromStr;
pub fn upcase_first(s: &str) -> String { pub fn upcase_first(s: &str) -> String {
@@ -264,12 +265,12 @@ pub fn upcase_first(s: &str) -> String {
} }
} }
pub fn try_parse_string<S, T, U>(string: impl Try<Ok = S, Error = U>) -> Option<T> pub fn try_parse_string<S, T>(string: Option<S>) -> Option<T>
where where
S: AsRef<str>, S: AsRef<str>,
T: FromStr, T: FromStr,
{ {
if let Ok(Ok(value)) = string.into_result().map(|s| s.as_ref().parse::<T>()) { if let Some(Ok(value)) = string.map(|s| s.as_ref().parse::<T>()) {
Some(value) Some(value)
} else { } else {
None None
@@ -286,7 +287,7 @@ pub fn get_env<V>(key: &str) -> Option<V>
where where
V: FromStr, V: FromStr,
{ {
try_parse_string(env::var(key)) try_parse_string(env::var(key).ok())
} }
const TRUE_VALUES: &[&str] = &["true", "t", "yes", "y", "1"]; const TRUE_VALUES: &[&str] = &["true", "t", "yes", "y", "1"];