mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-11 19:25:56 +03:00
Compare commits
54 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
e3feba2a2c | ||
|
0a68de6c24 | ||
|
4be8dae626 | ||
|
77f95146d6 | ||
|
6cd8512bbd | ||
|
843604c9e7 | ||
|
7407b8326a | ||
|
adf47827c9 | ||
|
5471088e93 | ||
|
4e85a1dee1 | ||
|
ec60839064 | ||
|
d4bfa1a189 | ||
|
862d401077 | ||
|
255a06382d | ||
|
bbb0484d03 | ||
|
93346bc05d | ||
|
fdf50f0064 | ||
|
ccf6ee79d0 | ||
|
91dd19473d | ||
|
c06162b22f | ||
|
7a6a3e4160 | ||
|
94341f9f3f | ||
|
ff19fb3426 | ||
|
baac8d9627 | ||
|
669b101e6a | ||
|
935f38692f | ||
|
d2d9fb08cc | ||
|
b85d548879 | ||
|
35f30088b2 | ||
|
dce054e632 | ||
|
ba725e1c25 | ||
|
b837348b25 | ||
|
7d9c7017c9 | ||
|
d6b9b8bf0c | ||
|
bd09fe1a3d | ||
|
bcbe6177b8 | ||
|
9b1d07365e | ||
|
37b212427c | ||
|
078234d8b3 | ||
|
3ce0c3d1a5 | ||
|
2ee07ea1d8 | ||
|
40c339db9b | ||
|
402c1cd06c | ||
|
819f340f39 | ||
|
1b4b40c95d | ||
|
afd9f4e278 | ||
|
47a9461f39 | ||
|
c6f64d8368 | ||
|
edabf19ddf | ||
|
a30d5f4cf9 | ||
|
3fa78e7bb1 | ||
|
a8a7e4f9a5 | ||
|
5d3b765a23 | ||
|
7439aeb63e |
1129
Cargo.lock
generated
1129
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
36
Cargo.toml
36
Cargo.toml
@@ -26,7 +26,7 @@ rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
|
|||||||
rocket_contrib = "0.5.0-dev"
|
rocket_contrib = "0.5.0-dev"
|
||||||
|
|
||||||
# HTTP client
|
# HTTP client
|
||||||
reqwest = "0.9.24"
|
reqwest = { version = "0.10.4", features = ["blocking", "json"] }
|
||||||
|
|
||||||
# multipart/form-data support
|
# multipart/form-data support
|
||||||
multipart = { version = "0.16.1", features = ["server"], default-features = false }
|
multipart = { version = "0.16.1", features = ["server"], default-features = false }
|
||||||
@@ -41,29 +41,30 @@ rmpv = "0.4.4"
|
|||||||
chashmap = "2.2.2"
|
chashmap = "2.2.2"
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = "1.0.104"
|
serde = "1.0.106"
|
||||||
serde_derive = "1.0.104"
|
serde_derive = "1.0.106"
|
||||||
serde_json = "1.0.48"
|
serde_json = "1.0.51"
|
||||||
|
|
||||||
# Logging
|
# Logging
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
fern = { version = "0.6.0", features = ["syslog-4"] }
|
fern = { version = "0.6.0", features = ["syslog-4"] }
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "1.4.3", features = [ "chrono", "r2d2"] }
|
diesel = { version = "1.4.4", features = [ "chrono", "r2d2"] }
|
||||||
diesel_migrations = "1.4.0"
|
diesel_migrations = "1.4.0"
|
||||||
|
|
||||||
# Bundled SQLite
|
# Bundled SQLite
|
||||||
libsqlite3-sys = { version = "0.16.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.17.3", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto library
|
# Crypto library
|
||||||
ring = "0.14.6"
|
ring = "0.16.12"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "0.8.1", features = ["v4"] }
|
uuid = { version = "0.8.1", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time library for Rust
|
# Date and time librar for Rust
|
||||||
chrono = "0.4.11"
|
chrono = "0.4.11"
|
||||||
|
time = "0.2.9"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
oath = "0.10.2"
|
oath = "0.10.2"
|
||||||
@@ -72,13 +73,13 @@ oath = "0.10.2"
|
|||||||
data-encoding = "2.2.0"
|
data-encoding = "2.2.0"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "6.0.1"
|
jsonwebtoken = "7.1.0"
|
||||||
|
|
||||||
# U2F library
|
# U2F library
|
||||||
u2f = "0.2.0"
|
u2f = "0.2.0"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.7.1", features = ["online-tokio"], default-features = false }
|
yubico = { version = "0.9.0", features = ["online-tokio"], default-features = false }
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# A `dotenv` implementation for Rust
|
||||||
dotenv = { version = "0.15.0", default-features = false }
|
dotenv = { version = "0.15.0", default-features = false }
|
||||||
@@ -87,7 +88,7 @@ dotenv = { version = "0.15.0", default-features = false }
|
|||||||
once_cell = "1.3.1"
|
once_cell = "1.3.1"
|
||||||
|
|
||||||
# More derives
|
# More derives
|
||||||
derive_more = "0.99.3"
|
derive_more = "0.99.5"
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.11"
|
num-traits = "0.2.11"
|
||||||
@@ -103,11 +104,11 @@ handlebars = { version = "3.0.1", features = ["dir_source"] }
|
|||||||
|
|
||||||
# For favicon extraction from main website
|
# For favicon extraction from main website
|
||||||
soup = "0.5.0"
|
soup = "0.5.0"
|
||||||
regex = "1.3.4"
|
regex = "1.3.6"
|
||||||
data-url = "0.1.0"
|
data-url = "0.1.0"
|
||||||
|
|
||||||
# Used by U2F, JWT and Postgres
|
# Used by U2F, JWT and Postgres
|
||||||
openssl = "0.10.28"
|
openssl = "0.10.29"
|
||||||
|
|
||||||
# URL encoding library
|
# URL encoding library
|
||||||
percent-encoding = "2.1.0"
|
percent-encoding = "2.1.0"
|
||||||
@@ -115,12 +116,15 @@ percent-encoding = "2.1.0"
|
|||||||
idna = "0.2.0"
|
idna = "0.2.0"
|
||||||
|
|
||||||
# CLI argument parsing
|
# CLI argument parsing
|
||||||
structopt = "0.3.11"
|
structopt = "0.3.13"
|
||||||
|
|
||||||
|
# Logging panics to logfile instead stderr only
|
||||||
|
backtrace = "0.3.46"
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
# Use newest ring
|
# Use newest ring
|
||||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'b95b6765e1cc8be7c1e7eaef8a9d9ad940b0ac13' }
|
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dfc9e9aab01d349da32c52db393e35b7fffea63c' }
|
||||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'b95b6765e1cc8be7c1e7eaef8a9d9ad940b0ac13' }
|
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dfc9e9aab01d349da32c52db393e35b7fffea63c' }
|
||||||
|
|
||||||
# Use git version for timeout fix #706
|
# Use git version for timeout fix #706
|
||||||
lettre = { git = 'https://github.com/lettre/lettre', rev = '245c600c82ee18b766e8729f005ff453a55dce34' }
|
lettre = { git = 'https://github.com/lettre/lettre', rev = '245c600c82ee18b766e8729f005ff453a55dce34' }
|
||||||
|
16
build.rs
16
build.rs
@@ -1,4 +1,5 @@
|
|||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
|
use std::env;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
#[cfg(all(feature = "sqlite", feature = "mysql"))]
|
#[cfg(all(feature = "sqlite", feature = "mysql"))]
|
||||||
@@ -10,8 +11,13 @@ fn main() {
|
|||||||
|
|
||||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||||
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
||||||
|
|
||||||
read_git_info().ok();
|
if let Ok(version) = env::var("BWRS_VERSION") {
|
||||||
|
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||||
|
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||||
|
} else {
|
||||||
|
read_git_info().ok();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
||||||
@@ -54,14 +60,16 @@ fn read_git_info() -> Result<(), std::io::Error> {
|
|||||||
} else {
|
} else {
|
||||||
format!("{}-{}", last_tag, rev_short)
|
format!("{}-{}", last_tag, rev_short)
|
||||||
};
|
};
|
||||||
println!("cargo:rustc-env=GIT_VERSION={}", version);
|
|
||||||
|
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||||
|
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||||
|
|
||||||
// To access these values, use:
|
// To access these values, use:
|
||||||
// env!("GIT_EXACT_TAG")
|
// env!("GIT_EXACT_TAG")
|
||||||
// env!("GIT_LAST_TAG")
|
// env!("GIT_LAST_TAG")
|
||||||
// env!("GIT_BRANCH")
|
// env!("GIT_BRANCH")
|
||||||
// env!("GIT_REV")
|
// env!("GIT_REV")
|
||||||
// env!("GIT_VERSION")
|
// env!("BWRS_VERSION")
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@@ -27,17 +27,17 @@
|
|||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
{% set vault_image_hash = "sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c" %}
|
{% set vault_image_hash = "sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554" %}
|
||||||
{% raw %}
|
{% raw %}
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
{% endraw %}
|
{% endraw %}
|
||||||
FROM bitwardenrs/web-vault@{{ vault_image_hash }} as vault
|
FROM bitwardenrs/web-vault@{{ vault_image_hash }} as vault
|
||||||
|
|
||||||
@@ -71,6 +71,7 @@ RUN rustup set profile minimal
|
|||||||
|
|
||||||
{% if "alpine" in target_file %}
|
{% if "alpine" in target_file %}
|
||||||
ENV USER "root"
|
ENV USER "root"
|
||||||
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
|
||||||
{% elif "aarch64" in target_file or "armv" in target_file %}
|
{% elif "aarch64" in target_file or "armv" in target_file %}
|
||||||
# Install required build libs for {{ package_arch_name }} architecture.
|
# Install required build libs for {{ package_arch_name }} architecture.
|
||||||
@@ -286,9 +287,9 @@ COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs
|
|||||||
COPY --from=build app/target/release/bitwarden_rs .
|
COPY --from=build app/target/release/bitwarden_rs .
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@@ -10,12 +10,12 @@
|
|||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
|
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
# We need to use the Rust build image, because
|
||||||
@@ -124,9 +124,9 @@ COPY Rocket.toml .
|
|||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@@ -10,12 +10,12 @@
|
|||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
|
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
# We need to use the Rust build image, because
|
||||||
@@ -118,9 +118,9 @@ COPY Rocket.toml .
|
|||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@@ -10,12 +10,12 @@
|
|||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
|
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
# We need to use the Rust build image, because
|
||||||
@@ -92,9 +92,9 @@ COPY Rocket.toml .
|
|||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
COPY --from=build app/target/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@@ -10,16 +10,16 @@
|
|||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
|
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# Musl build image for statically compiled binary
|
# Musl build image for statically compiled binary
|
||||||
FROM clux/muslrust:nightly-2019-12-19 as build
|
FROM clux/muslrust:nightly-2020-03-09 as build
|
||||||
|
|
||||||
# set mysql backend
|
# set mysql backend
|
||||||
ARG DB=mysql
|
ARG DB=mysql
|
||||||
@@ -31,6 +31,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|||||||
RUN rustup set profile minimal
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
ENV USER "root"
|
ENV USER "root"
|
||||||
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
|
||||||
# Install MySQL package
|
# Install MySQL package
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
@@ -94,9 +95,9 @@ COPY Rocket.toml .
|
|||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@@ -10,12 +10,12 @@
|
|||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
|
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
# We need to use the Rust build image, because
|
||||||
@@ -92,9 +92,9 @@ COPY Rocket.toml .
|
|||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
COPY --from=build app/target/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@@ -10,16 +10,16 @@
|
|||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
|
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# Musl build image for statically compiled binary
|
# Musl build image for statically compiled binary
|
||||||
FROM clux/muslrust:nightly-2019-12-19 as build
|
FROM clux/muslrust:nightly-2020-03-09 as build
|
||||||
|
|
||||||
# set postgresql backend
|
# set postgresql backend
|
||||||
ARG DB=postgresql
|
ARG DB=postgresql
|
||||||
@@ -31,6 +31,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|||||||
RUN rustup set profile minimal
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
ENV USER "root"
|
ENV USER "root"
|
||||||
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
|
||||||
# Install PostgreSQL package
|
# Install PostgreSQL package
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
@@ -94,9 +95,9 @@ COPY Rocket.toml .
|
|||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@@ -10,12 +10,12 @@
|
|||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
|
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
# We need to use the Rust build image, because
|
||||||
@@ -86,9 +86,9 @@ COPY Rocket.toml .
|
|||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
COPY --from=build app/target/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@@ -10,16 +10,16 @@
|
|||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
|
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# Musl build image for statically compiled binary
|
# Musl build image for statically compiled binary
|
||||||
FROM clux/muslrust:nightly-2019-12-19 as build
|
FROM clux/muslrust:nightly-2020-03-09 as build
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward compatibility
|
# set sqlite as default for DB ARG for backward compatibility
|
||||||
ARG DB=sqlite
|
ARG DB=sqlite
|
||||||
@@ -31,6 +31,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|||||||
RUN rustup set profile minimal
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
ENV USER "root"
|
ENV USER "root"
|
||||||
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
@@ -88,9 +89,9 @@ COPY Rocket.toml .
|
|||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@@ -10,12 +10,12 @@
|
|||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
|
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
# We need to use the Rust build image, because
|
||||||
@@ -124,9 +124,9 @@ COPY Rocket.toml .
|
|||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@@ -10,12 +10,12 @@
|
|||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
|
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
# We need to use the Rust build image, because
|
||||||
@@ -118,9 +118,9 @@ COPY Rocket.toml .
|
|||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@@ -10,12 +10,12 @@
|
|||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
|
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
# We need to use the Rust build image, because
|
||||||
@@ -123,9 +123,9 @@ COPY Rocket.toml .
|
|||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@@ -10,12 +10,12 @@
|
|||||||
# It can be viewed in multiple ways:
|
# It can be viewed in multiple ways:
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||||
# - From the console, with the following commands:
|
# - From the console, with the following commands:
|
||||||
# docker pull bitwardenrs/web-vault:v2.12.0e
|
# docker pull bitwardenrs/web-vault:v2.13.2b
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
|
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.13.2b
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# - To do the opposite, and get the tag from the hash, you can do:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
|
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554
|
||||||
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
|
FROM bitwardenrs/web-vault@sha256:f32c555a2bc3ee6bc0718319b1e8057c10ef889cf7231f0ff217af98486da554 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
# We need to use the Rust build image, because
|
||||||
@@ -117,9 +117,9 @@ COPY Rocket.toml .
|
|||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
59
docker/healthcheck.sh
Normal file → Executable file
59
docker/healthcheck.sh
Normal file → Executable file
@@ -1,8 +1,53 @@
|
|||||||
#!/usr/bin/env sh
|
#!/bin/sh
|
||||||
|
|
||||||
if [ -z "$ROCKET_TLS"]
|
# Use the value of the corresponding env var (if present),
|
||||||
then
|
# or a default value otherwise.
|
||||||
curl --fail http://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
|
: ${DATA_FOLDER:="data"}
|
||||||
else
|
: ${ROCKET_PORT:="80"}
|
||||||
curl --insecure --fail https://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
|
|
||||||
fi
|
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||||
|
|
||||||
|
# Given a config key, return the corresponding config value from the
|
||||||
|
# config file. If the key doesn't exist, return an empty string.
|
||||||
|
get_config_val() {
|
||||||
|
local key="$1"
|
||||||
|
# Extract a line of the form:
|
||||||
|
# "domain": "https://bw.example.com/path",
|
||||||
|
grep "\"${key}\":" "${CONFIG_FILE}" |
|
||||||
|
# To extract just the value (https://bw.example.com/path), delete:
|
||||||
|
# (1) everything up to and including the first ':',
|
||||||
|
# (2) whitespace and '"' from the front,
|
||||||
|
# (3) ',' and '"' from the back.
|
||||||
|
sed -e 's/[^:]\+://' -e 's/^[ "]\+//' -e 's/[,"]\+$//'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Extract the base path from a domain URL. For example:
|
||||||
|
# - `` -> ``
|
||||||
|
# - `https://bw.example.com` -> ``
|
||||||
|
# - `https://bw.example.com/` -> ``
|
||||||
|
# - `https://bw.example.com/path` -> `/path`
|
||||||
|
# - `https://bw.example.com/multi/path` -> `/multi/path`
|
||||||
|
get_base_path() {
|
||||||
|
echo "$1" |
|
||||||
|
# Delete:
|
||||||
|
# (1) everything up to and including '://',
|
||||||
|
# (2) everything up to '/',
|
||||||
|
# (3) trailing '/' from the back.
|
||||||
|
sed -e 's|.*://||' -e 's|[^/]\+||' -e 's|/*$||'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Read domain URL from config.json, if present.
|
||||||
|
if [ -r "${CONFIG_FILE}" ]; then
|
||||||
|
domain="$(get_config_val 'domain')"
|
||||||
|
if [ -n "${domain}" ]; then
|
||||||
|
# config.json 'domain' overrides the DOMAIN env var.
|
||||||
|
DOMAIN="${domain}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
base_path="$(get_base_path "${DOMAIN}")"
|
||||||
|
if [ -n "${ROCKET_TLS}" ]; then
|
||||||
|
s='s'
|
||||||
|
fi
|
||||||
|
curl --insecure --fail --silent --show-error \
|
||||||
|
"http${s}://localhost:${ROCKET_PORT}${base_path}/alive" || exit 1
|
||||||
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE org_policies;
|
@@ -0,0 +1,9 @@
|
|||||||
|
CREATE TABLE org_policies (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
enabled BOOLEAN NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
|
||||||
|
UNIQUE (org_uuid, atype)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE org_policies;
|
@@ -0,0 +1,9 @@
|
|||||||
|
CREATE TABLE org_policies (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
enabled BOOLEAN NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
|
||||||
|
UNIQUE (org_uuid, atype)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE org_policies;
|
@@ -0,0 +1,9 @@
|
|||||||
|
CREATE TABLE org_policies (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
org_uuid TEXT NOT NULL REFERENCES organizations (uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
enabled BOOLEAN NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
|
||||||
|
UNIQUE (org_uuid, atype)
|
||||||
|
);
|
@@ -1 +1 @@
|
|||||||
nightly-2020-03-09
|
nightly-2020-04-08
|
@@ -17,7 +17,7 @@ use crate::mail;
|
|||||||
use crate::CONFIG;
|
use crate::CONFIG;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
if CONFIG.admin_token().is_none() && !CONFIG.disable_admin_token() {
|
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
|
||||||
return routes![admin_disabled];
|
return routes![admin_disabled];
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -51,7 +51,7 @@ const COOKIE_NAME: &str = "BWRS_ADMIN";
|
|||||||
const ADMIN_PATH: &str = "/admin";
|
const ADMIN_PATH: &str = "/admin";
|
||||||
|
|
||||||
const BASE_TEMPLATE: &str = "admin/base";
|
const BASE_TEMPLATE: &str = "admin/base";
|
||||||
const VERSION: Option<&str> = option_env!("GIT_VERSION");
|
const VERSION: Option<&str> = option_env!("BWRS_VERSION");
|
||||||
|
|
||||||
fn admin_path() -> String {
|
fn admin_path() -> String {
|
||||||
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
||||||
@@ -91,7 +91,7 @@ fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -
|
|||||||
|
|
||||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||||
.path(admin_path())
|
.path(admin_path())
|
||||||
.max_age(chrono::Duration::minutes(20))
|
.max_age(time::Duration::minutes(20))
|
||||||
.same_site(SameSite::Strict)
|
.same_site(SameSite::Strict)
|
||||||
.http_only(true)
|
.http_only(true)
|
||||||
.finish();
|
.finish();
|
||||||
|
@@ -79,6 +79,9 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
||||||
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
|
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
|
||||||
|
|
||||||
|
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
|
||||||
|
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
||||||
let ciphers_json: Vec<Value> = ciphers
|
let ciphers_json: Vec<Value> = ciphers
|
||||||
.iter()
|
.iter()
|
||||||
@@ -95,6 +98,7 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
"Profile": user_json,
|
"Profile": user_json,
|
||||||
"Folders": folders_json,
|
"Folders": folders_json,
|
||||||
"Collections": collections_json,
|
"Collections": collections_json,
|
||||||
|
"Policies": policies_json,
|
||||||
"Ciphers": ciphers_json,
|
"Ciphers": ciphers_json,
|
||||||
"Domains": domains_json,
|
"Domains": domains_json,
|
||||||
"Object": "sync"
|
"Object": "sync"
|
||||||
@@ -599,10 +603,14 @@ fn share_cipher_by_uuid(
|
|||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let mut shared_to_collection = false;
|
||||||
|
|
||||||
match data.Cipher.OrganizationId.clone() {
|
match data.Cipher.OrganizationId.clone() {
|
||||||
None => err!("Organization id not provided"),
|
// If we don't get an organization ID, we don't do anything
|
||||||
|
// No error because this is used when using the Clone functionality
|
||||||
|
None => {},
|
||||||
Some(organization_uuid) => {
|
Some(organization_uuid) => {
|
||||||
let mut shared_to_collection = false;
|
|
||||||
for uuid in &data.CollectionIds {
|
for uuid in &data.CollectionIds {
|
||||||
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
|
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
|
||||||
None => err!("Invalid collection ID provided"),
|
None => err!("Invalid collection ID provided"),
|
||||||
@@ -616,19 +624,20 @@ fn share_cipher_by_uuid(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
update_cipher_from_data(
|
|
||||||
&mut cipher,
|
|
||||||
data.Cipher,
|
|
||||||
&headers,
|
|
||||||
shared_to_collection,
|
|
||||||
&conn,
|
|
||||||
&nt,
|
|
||||||
UpdateType::CipherUpdate,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
|
||||||
|
update_cipher_from_data(
|
||||||
|
&mut cipher,
|
||||||
|
data.Cipher,
|
||||||
|
&headers,
|
||||||
|
shared_to_collection,
|
||||||
|
&conn,
|
||||||
|
&nt,
|
||||||
|
UpdateType::CipherUpdate,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
|
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
|
||||||
@@ -648,7 +657,7 @@ fn post_attachment(
|
|||||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||||
err_discard!("Cipher is not write accessible", data)
|
err_discard!("Cipher is not write accessible", data)
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut params = content_type.params();
|
let mut params = content_type.params();
|
||||||
let boundary_pair = params.next().expect("No boundary provided");
|
let boundary_pair = params.next().expect("No boundary provided");
|
||||||
let boundary = boundary_pair.1;
|
let boundary = boundary_pair.1;
|
||||||
@@ -656,8 +665,8 @@ fn post_attachment(
|
|||||||
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
||||||
match CONFIG.user_attachment_limit() {
|
match CONFIG.user_attachment_limit() {
|
||||||
Some(0) => err_discard!("Attachments are disabled", data),
|
Some(0) => err_discard!("Attachments are disabled", data),
|
||||||
Some(limit) => {
|
Some(limit_kb) => {
|
||||||
let left = limit - Attachment::size_by_user(user_uuid, &conn);
|
let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &conn);
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
||||||
}
|
}
|
||||||
@@ -668,8 +677,8 @@ fn post_attachment(
|
|||||||
} else if let Some(ref org_uuid) = cipher.organization_uuid {
|
} else if let Some(ref org_uuid) = cipher.organization_uuid {
|
||||||
match CONFIG.org_attachment_limit() {
|
match CONFIG.org_attachment_limit() {
|
||||||
Some(0) => err_discard!("Attachments are disabled", data),
|
Some(0) => err_discard!("Attachments are disabled", data),
|
||||||
Some(limit) => {
|
Some(limit_kb) => {
|
||||||
let left = limit - Attachment::size_by_org(org_uuid, &conn);
|
let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &conn);
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
||||||
}
|
}
|
||||||
|
@@ -146,10 +146,10 @@ fn hibp_breach(username: String) -> JsonResult {
|
|||||||
username
|
username
|
||||||
);
|
);
|
||||||
|
|
||||||
use reqwest::{header::USER_AGENT, Client};
|
use reqwest::{header::USER_AGENT, blocking::Client};
|
||||||
|
|
||||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||||
let hibp_client = Client::builder().use_sys_proxy().build()?;
|
let hibp_client = Client::builder().build()?;
|
||||||
|
|
||||||
let res = hibp_client
|
let res = hibp_client
|
||||||
.get(&url)
|
.get(&url)
|
||||||
|
@@ -2,6 +2,7 @@ use rocket::request::Form;
|
|||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
use num_traits::FromPrimitive;
|
||||||
|
|
||||||
use crate::api::{
|
use crate::api::{
|
||||||
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType,
|
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType,
|
||||||
@@ -45,6 +46,10 @@ pub fn routes() -> Vec<Route> {
|
|||||||
delete_user,
|
delete_user,
|
||||||
post_delete_user,
|
post_delete_user,
|
||||||
post_org_import,
|
post_org_import,
|
||||||
|
list_policies,
|
||||||
|
list_policies_token,
|
||||||
|
get_policy,
|
||||||
|
put_policy,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -830,22 +835,13 @@ struct RelationsData {
|
|||||||
fn post_org_import(
|
fn post_org_import(
|
||||||
query: Form<OrgIdData>,
|
query: Form<OrgIdData>,
|
||||||
data: JsonUpcase<ImportData>,
|
data: JsonUpcase<ImportData>,
|
||||||
headers: Headers,
|
headers: AdminHeaders,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
nt: Notify,
|
nt: Notify,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: ImportData = data.into_inner().data;
|
let data: ImportData = data.into_inner().data;
|
||||||
let org_id = query.into_inner().organization_id;
|
let org_id = query.into_inner().organization_id;
|
||||||
|
|
||||||
let org_user = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
|
||||||
Some(user) => user,
|
|
||||||
None => err!("User is not part of the organization"),
|
|
||||||
};
|
|
||||||
|
|
||||||
if org_user.atype < UserOrgType::Admin {
|
|
||||||
err!("Only admins or owners can import into an organization")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read and create the collections
|
// Read and create the collections
|
||||||
let collections: Vec<_> = data
|
let collections: Vec<_> = data
|
||||||
.Collections
|
.Collections
|
||||||
@@ -866,6 +862,8 @@ fn post_org_import(
|
|||||||
relations.push((relation.Key, relation.Value));
|
relations.push((relation.Key, relation.Value));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let headers: Headers = headers.into();
|
||||||
|
|
||||||
// Read and create the ciphers
|
// Read and create the ciphers
|
||||||
let ciphers: Vec<_> = data
|
let ciphers: Vec<_> = data
|
||||||
.Ciphers
|
.Ciphers
|
||||||
@@ -901,3 +899,83 @@ fn post_org_import(
|
|||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
user.update_revision(&conn)
|
user.update_revision(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/organizations/<org_id>/policies")]
|
||||||
|
fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||||
|
let policies = OrgPolicy::find_by_org(&org_id, &conn);
|
||||||
|
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Data": policies_json,
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/organizations/<org_id>/policies/token?<token>")]
|
||||||
|
fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResult {
|
||||||
|
let invite = crate::auth::decode_invite(&token)?;
|
||||||
|
|
||||||
|
let invite_org_id = match invite.org_id {
|
||||||
|
Some(invite_org_id) => invite_org_id,
|
||||||
|
None => err!("Invalid token"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if invite_org_id != org_id {
|
||||||
|
err!("Token doesn't match request organization");
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: We receive the invite token as ?token=<>, validate it contains the org id
|
||||||
|
let policies = OrgPolicy::find_by_org(&org_id, &conn);
|
||||||
|
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Data": policies_json,
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/organizations/<org_id>/policies/<pol_type>")]
|
||||||
|
fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||||
|
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||||
|
Some(pt) => pt,
|
||||||
|
None => err!("Invalid policy type"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
||||||
|
Some(p) => p,
|
||||||
|
None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Json(policy.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct PolicyData {
|
||||||
|
enabled: bool,
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
_type: i32,
|
||||||
|
data: Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/organizations/<org_id>/policies/<pol_type>", data = "<data>")]
|
||||||
|
fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||||
|
let data: PolicyData = data.into_inner();
|
||||||
|
|
||||||
|
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||||
|
Some(pt) => pt,
|
||||||
|
None => err!("Invalid policy type"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
||||||
|
Some(p) => p,
|
||||||
|
None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
policy.enabled = data.enabled;
|
||||||
|
policy.data = serde_json::to_string(&data.data)?;
|
||||||
|
policy.save(&conn)?;
|
||||||
|
|
||||||
|
Ok(Json(policy.to_json()))
|
||||||
|
}
|
@@ -21,9 +21,9 @@ pub fn routes() -> Vec<Route> {
|
|||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
struct DuoData {
|
struct DuoData {
|
||||||
host: String,
|
host: String, // Duo API hostname
|
||||||
ik: String,
|
ik: String, // integration key
|
||||||
sk: String,
|
sk: String, // secret key
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DuoData {
|
impl DuoData {
|
||||||
@@ -187,9 +187,10 @@ fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbC
|
|||||||
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||||
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
|
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
|
||||||
|
|
||||||
use reqwest::{header::*, Client, Method};
|
use reqwest::{header::*, Method, blocking::Client};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
// https://duo.com/docs/authapi#api-details
|
||||||
let url = format!("https://{}{}", &data.host, path);
|
let url = format!("https://{}{}", &data.host, path);
|
||||||
let date = Utc::now().to_rfc2822();
|
let date = Utc::now().to_rfc2822();
|
||||||
let username = &data.ik;
|
let username = &data.ik;
|
||||||
@@ -268,6 +269,10 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||||
|
// email is as entered by the user, so it needs to be normalized before
|
||||||
|
// comparison with auth_user below.
|
||||||
|
let email = &email.to_lowercase();
|
||||||
|
|
||||||
let split: Vec<&str> = response.split(':').collect();
|
let split: Vec<&str> = response.split(':').collect();
|
||||||
if split.len() != 2 {
|
if split.len() != 2 {
|
||||||
err!("Invalid response length");
|
err!("Invalid response length");
|
||||||
|
@@ -8,7 +8,7 @@ use rocket::http::ContentType;
|
|||||||
use rocket::response::Content;
|
use rocket::response::Content;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
|
|
||||||
use reqwest::{header::HeaderMap, Client, Response, Url};
|
use reqwest::{Url, header::HeaderMap, blocking::Client, blocking::Response};
|
||||||
|
|
||||||
use rocket::http::Cookie;
|
use rocket::http::Cookie;
|
||||||
|
|
||||||
@@ -30,8 +30,6 @@ const ALLOWED_CHARS: &str = "_-.";
|
|||||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||||
// Reuse the client between requests
|
// Reuse the client between requests
|
||||||
Client::builder()
|
Client::builder()
|
||||||
.use_sys_proxy()
|
|
||||||
.gzip(true)
|
|
||||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||||
.default_headers(_header_map())
|
.default_headers(_header_map())
|
||||||
.build()
|
.build()
|
||||||
|
@@ -347,6 +347,7 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
|
||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct ConnectData {
|
struct ConnectData {
|
||||||
@@ -364,6 +365,7 @@ struct ConnectData {
|
|||||||
device_identifier: Option<String>,
|
device_identifier: Option<String>,
|
||||||
device_name: Option<String>,
|
device_name: Option<String>,
|
||||||
device_type: Option<String>,
|
device_type: Option<String>,
|
||||||
|
device_push_token: Option<String>, // Unused; mobile device push not yet supported.
|
||||||
|
|
||||||
// Needed for two-factor auth
|
// Needed for two-factor auth
|
||||||
two_factor_provider: Option<i32>,
|
two_factor_provider: Option<i32>,
|
||||||
@@ -391,6 +393,7 @@ impl<'f> FromForm<'f> for ConnectData {
|
|||||||
"deviceidentifier" => form.device_identifier = Some(value),
|
"deviceidentifier" => form.device_identifier = Some(value),
|
||||||
"devicename" => form.device_name = Some(value),
|
"devicename" => form.device_name = Some(value),
|
||||||
"devicetype" => form.device_type = Some(value),
|
"devicetype" => form.device_type = Some(value),
|
||||||
|
"devicepushtoken" => form.device_push_token = Some(value),
|
||||||
"twofactorprovider" => form.two_factor_provider = value.parse().ok(),
|
"twofactorprovider" => form.two_factor_provider = value.parse().ok(),
|
||||||
"twofactortoken" => form.two_factor_token = Some(value),
|
"twofactortoken" => form.two_factor_token = Some(value),
|
||||||
"twofactorremember" => form.two_factor_remember = value.parse().ok(),
|
"twofactorremember" => form.two_factor_remember = value.parse().ok(),
|
||||||
|
@@ -152,6 +152,9 @@ impl WSHandler {
|
|||||||
impl Handler for WSHandler {
|
impl Handler for WSHandler {
|
||||||
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
|
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
|
||||||
// Path == "/notifications/hub?id=<id>==&access_token=<access_token>"
|
// Path == "/notifications/hub?id=<id>==&access_token=<access_token>"
|
||||||
|
//
|
||||||
|
// We don't use `id`, and as of around 2020-03-25, the official clients
|
||||||
|
// no longer seem to pass `id` (only `access_token`).
|
||||||
let path = hs.request.resource();
|
let path = hs.request.resource();
|
||||||
|
|
||||||
let (_id, access_token) = match path.split('?').nth(1) {
|
let (_id, access_token) = match path.split('?').nth(1) {
|
||||||
@@ -170,10 +173,11 @@ impl Handler for WSHandler {
|
|||||||
|
|
||||||
match (id, access_token) {
|
match (id, access_token) {
|
||||||
(Some(a), Some(b)) => (a, b),
|
(Some(a), Some(b)) => (a, b),
|
||||||
_ => return self.err("Missing id or access token"),
|
(None, Some(b)) => ("", b), // Ignore missing `id`.
|
||||||
|
_ => return self.err("Missing access token"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => return self.err("Missing query path"),
|
None => return self.err("Missing query parameters"),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Validate the user
|
// Validate the user
|
||||||
|
43
src/auth.rs
43
src/auth.rs
@@ -4,8 +4,9 @@
|
|||||||
use crate::util::read_file;
|
use crate::util::read_file;
|
||||||
use chrono::{Duration, Utc};
|
use chrono::{Duration, Utc};
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
use num_traits::FromPrimitive;
|
||||||
|
|
||||||
use jsonwebtoken::{self, Algorithm, Header};
|
use jsonwebtoken::{self, Algorithm, Header, EncodingKey, DecodingKey};
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde::ser::Serialize;
|
use serde::ser::Serialize;
|
||||||
|
|
||||||
@@ -31,7 +32,7 @@ static PUBLIC_RSA_KEY: Lazy<Vec<u8>> = Lazy::new(|| match read_file(&CONFIG.publ
|
|||||||
});
|
});
|
||||||
|
|
||||||
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
||||||
match jsonwebtoken::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
|
match jsonwebtoken::encode(&JWT_HEADER, claims, &EncodingKey::from_rsa_der(&PRIVATE_RSA_KEY)) {
|
||||||
Ok(token) => token,
|
Ok(token) => token,
|
||||||
Err(e) => panic!("Error encoding jwt {}", e),
|
Err(e) => panic!("Error encoding jwt {}", e),
|
||||||
}
|
}
|
||||||
@@ -50,7 +51,7 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
|
|||||||
|
|
||||||
let token = token.replace(char::is_whitespace, "");
|
let token = token.replace(char::is_whitespace, "");
|
||||||
|
|
||||||
jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation)
|
jsonwebtoken::decode(&token, &DecodingKey::from_rsa_der(&PUBLIC_RSA_KEY), &validation)
|
||||||
.map(|d| d.claims)
|
.map(|d| d.claims)
|
||||||
.map_res("Error decoding JWT")
|
.map_res("Error decoding JWT")
|
||||||
}
|
}
|
||||||
@@ -306,6 +307,25 @@ pub struct OrgHeaders {
|
|||||||
pub org_user_type: UserOrgType,
|
pub org_user_type: UserOrgType,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// org_id is usually the second param ("/organizations/<org_id>")
|
||||||
|
// But there are cases where it is located in a query value.
|
||||||
|
// First check the param, if this is not a valid uuid, we will try the query value.
|
||||||
|
fn get_org_id(request: &Request) -> Option<String> {
|
||||||
|
if let Some(Ok(org_id)) = request.get_param::<String>(1) {
|
||||||
|
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||||
|
return Some(org_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(Ok(org_id)) = request.get_query_value::<String>("organizationId") {
|
||||||
|
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||||
|
return Some(org_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
||||||
type Error = &'static str;
|
type Error = &'static str;
|
||||||
|
|
||||||
@@ -314,9 +334,8 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
|||||||
Outcome::Forward(_) => Outcome::Forward(()),
|
Outcome::Forward(_) => Outcome::Forward(()),
|
||||||
Outcome::Failure(f) => Outcome::Failure(f),
|
Outcome::Failure(f) => Outcome::Failure(f),
|
||||||
Outcome::Success(headers) => {
|
Outcome::Success(headers) => {
|
||||||
// org_id is expected to be the second param ("/organizations/<org_id>")
|
match get_org_id(request) {
|
||||||
match request.get_param::<String>(1) {
|
Some(org_id) => {
|
||||||
Some(Ok(org_id)) => {
|
|
||||||
let conn = match request.guard::<DbConn>() {
|
let conn = match request.guard::<DbConn>() {
|
||||||
Outcome::Success(conn) => conn,
|
Outcome::Success(conn) => conn,
|
||||||
_ => err_handler!("Error getting DB"),
|
_ => err_handler!("Error getting DB"),
|
||||||
@@ -347,7 +366,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
},
|
||||||
_ => err_handler!("Error getting the organization id"),
|
_ => err_handler!("Error getting the organization id"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -385,6 +404,16 @@ impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Into<Headers> for AdminHeaders {
|
||||||
|
fn into(self) -> Headers {
|
||||||
|
Headers {
|
||||||
|
host: self.host,
|
||||||
|
device: self.device,
|
||||||
|
user: self.user
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct OwnerHeaders {
|
pub struct OwnerHeaders {
|
||||||
pub host: String,
|
pub host: String,
|
||||||
pub device: Device,
|
pub device: Device,
|
||||||
|
@@ -430,7 +430,8 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
|
|
||||||
if let Some(ref token) = cfg.admin_token {
|
if let Some(ref token) = cfg.admin_token {
|
||||||
if token.trim().is_empty() && !cfg.disable_admin_token {
|
if token.trim().is_empty() && !cfg.disable_admin_token {
|
||||||
err!("`ADMIN_TOKEN` is enabled but has an empty value. To enable the admin page without token, use `DISABLE_ADMIN_TOKEN`")
|
println!("[WARNING] `ADMIN_TOKEN` is enabled but has an empty value, so the admin page will be disabled.");
|
||||||
|
println!("[WARNING] To enable the admin page without a token, use `DISABLE_ADMIN_TOKEN`.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -617,6 +618,13 @@ impl Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Tests whether the admin token is set to a non-empty value.
|
||||||
|
pub fn is_admin_token_set(&self) -> bool {
|
||||||
|
let token = self.admin_token();
|
||||||
|
|
||||||
|
!token.is_none() && !token.unwrap().trim().is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn render_template<T: serde::ser::Serialize>(
|
pub fn render_template<T: serde::ser::Serialize>(
|
||||||
&self,
|
&self,
|
||||||
name: &str,
|
name: &str,
|
||||||
|
@@ -6,7 +6,7 @@ use crate::error::Error;
|
|||||||
use ring::{digest, hmac, pbkdf2};
|
use ring::{digest, hmac, pbkdf2};
|
||||||
use std::num::NonZeroU32;
|
use std::num::NonZeroU32;
|
||||||
|
|
||||||
static DIGEST_ALG: &digest::Algorithm = &digest::SHA256;
|
static DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256;
|
||||||
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
|
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
|
||||||
|
|
||||||
pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {
|
pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {
|
||||||
@@ -29,7 +29,7 @@ pub fn verify_password_hash(secret: &[u8], salt: &[u8], previous: &[u8], iterati
|
|||||||
pub fn hmac_sign(key: &str, data: &str) -> String {
|
pub fn hmac_sign(key: &str, data: &str) -> String {
|
||||||
use data_encoding::HEXLOWER;
|
use data_encoding::HEXLOWER;
|
||||||
|
|
||||||
let key = hmac::SigningKey::new(&digest::SHA1, key.as_bytes());
|
let key = hmac::Key::new(hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY, key.as_bytes());
|
||||||
let signature = hmac::sign(&key, data.as_bytes());
|
let signature = hmac::sign(&key, data.as_bytes());
|
||||||
|
|
||||||
HEXLOWER.encode(signature.as_ref())
|
HEXLOWER.encode(signature.as_ref())
|
||||||
|
@@ -76,7 +76,8 @@ impl<'a, 'r> FromRequest<'a, 'r> for DbConn {
|
|||||||
type Error = ();
|
type Error = ();
|
||||||
|
|
||||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<DbConn, ()> {
|
fn from_request(request: &'a Request<'r>) -> request::Outcome<DbConn, ()> {
|
||||||
let pool = request.guard::<State<Pool>>()?;
|
// https://github.com/SergioBenitez/Rocket/commit/e3c1a4ad3ab9b840482ec6de4200d30df43e357c
|
||||||
|
let pool = try_outcome!(request.guard::<State<Pool>>());
|
||||||
match pool.get() {
|
match pool.get() {
|
||||||
Ok(conn) => Outcome::Success(DbConn(conn)),
|
Ok(conn) => Outcome::Success(DbConn(conn)),
|
||||||
Err(_) => Outcome::Failure((Status::ServiceUnavailable, ())),
|
Err(_) => Outcome::Failure((Status::ServiceUnavailable, ())),
|
||||||
|
@@ -80,7 +80,19 @@ impl Cipher {
|
|||||||
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||||
let password_history_json = self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
let password_history_json = self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||||
|
|
||||||
let mut data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
|
// Get the data or a default empty value to avoid issues with the mobile apps
|
||||||
|
let mut data_json: Value = serde_json::from_str(&self.data).unwrap_or_else(|_| json!({
|
||||||
|
"Fields":null,
|
||||||
|
"Name": self.name,
|
||||||
|
"Notes":null,
|
||||||
|
"Password":null,
|
||||||
|
"PasswordHistory":null,
|
||||||
|
"PasswordRevisionDate":null,
|
||||||
|
"Response":null,
|
||||||
|
"Totp":null,
|
||||||
|
"Uris":null,
|
||||||
|
"Username":null
|
||||||
|
}));
|
||||||
|
|
||||||
// TODO: ******* Backwards compat start **********
|
// TODO: ******* Backwards compat start **********
|
||||||
// To remove backwards compatibility, just remove this entire section
|
// To remove backwards compatibility, just remove this entire section
|
||||||
|
@@ -7,6 +7,7 @@ mod user;
|
|||||||
mod collection;
|
mod collection;
|
||||||
mod organization;
|
mod organization;
|
||||||
mod two_factor;
|
mod two_factor;
|
||||||
|
mod org_policy;
|
||||||
|
|
||||||
pub use self::attachment::Attachment;
|
pub use self::attachment::Attachment;
|
||||||
pub use self::cipher::Cipher;
|
pub use self::cipher::Cipher;
|
||||||
@@ -17,3 +18,4 @@ pub use self::organization::Organization;
|
|||||||
pub use self::organization::{UserOrgStatus, UserOrgType, UserOrganization};
|
pub use self::organization::{UserOrgStatus, UserOrgType, UserOrganization};
|
||||||
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
||||||
pub use self::user::{Invitation, User};
|
pub use self::user::{Invitation, User};
|
||||||
|
pub use self::org_policy::{OrgPolicy, OrgPolicyType};
|
142
src/db/models/org_policy.rs
Normal file
142
src/db/models/org_policy.rs
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
use diesel;
|
||||||
|
use diesel::prelude::*;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use crate::api::EmptyResult;
|
||||||
|
use crate::db::schema::org_policies;
|
||||||
|
use crate::db::DbConn;
|
||||||
|
use crate::error::MapResult;
|
||||||
|
|
||||||
|
use super::Organization;
|
||||||
|
|
||||||
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||||
|
#[table_name = "org_policies"]
|
||||||
|
#[belongs_to(Organization, foreign_key = "org_uuid")]
|
||||||
|
#[primary_key(uuid)]
|
||||||
|
pub struct OrgPolicy {
|
||||||
|
pub uuid: String,
|
||||||
|
pub org_uuid: String,
|
||||||
|
pub atype: i32,
|
||||||
|
pub enabled: bool,
|
||||||
|
pub data: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
#[derive(FromPrimitive)]
|
||||||
|
pub enum OrgPolicyType {
|
||||||
|
TwoFactorAuthentication = 0,
|
||||||
|
MasterPassword = 1,
|
||||||
|
PasswordGenerator = 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Local methods
|
||||||
|
impl OrgPolicy {
|
||||||
|
pub fn new(org_uuid: String, atype: OrgPolicyType, data: String) -> Self {
|
||||||
|
Self {
|
||||||
|
uuid: crate::util::get_uuid(),
|
||||||
|
org_uuid,
|
||||||
|
atype: atype as i32,
|
||||||
|
enabled: false,
|
||||||
|
data,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_json(&self) -> Value {
|
||||||
|
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
|
||||||
|
json!({
|
||||||
|
"Id": self.uuid,
|
||||||
|
"OrganizationId": self.org_uuid,
|
||||||
|
"Type": self.atype,
|
||||||
|
"Data": data_json,
|
||||||
|
"Enabled": self.enabled,
|
||||||
|
"Object": "policy",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Database methods
|
||||||
|
impl OrgPolicy {
|
||||||
|
#[cfg(feature = "postgresql")]
|
||||||
|
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||||
|
// We need to make sure we're not going to violate the unique constraint on org_uuid and atype.
|
||||||
|
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
||||||
|
// not support multiple constraints on ON CONFLICT clauses.
|
||||||
|
diesel::delete(
|
||||||
|
org_policies::table
|
||||||
|
.filter(org_policies::org_uuid.eq(&self.org_uuid))
|
||||||
|
.filter(org_policies::atype.eq(&self.atype)),
|
||||||
|
)
|
||||||
|
.execute(&**conn)
|
||||||
|
.map_res("Error deleting org_policy for insert")?;
|
||||||
|
|
||||||
|
diesel::insert_into(org_policies::table)
|
||||||
|
.values(self)
|
||||||
|
.on_conflict(org_policies::uuid)
|
||||||
|
.do_update()
|
||||||
|
.set(self)
|
||||||
|
.execute(&**conn)
|
||||||
|
.map_res("Error saving org_policy")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "postgresql"))]
|
||||||
|
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||||
|
diesel::replace_into(org_policies::table)
|
||||||
|
.values(&*self)
|
||||||
|
.execute(&**conn)
|
||||||
|
.map_res("Error saving org_policy")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
|
diesel::delete(org_policies::table.filter(org_policies::uuid.eq(self.uuid)))
|
||||||
|
.execute(&**conn)
|
||||||
|
.map_res("Error deleting org_policy")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
|
org_policies::table
|
||||||
|
.filter(org_policies::uuid.eq(uuid))
|
||||||
|
.first::<Self>(&**conn)
|
||||||
|
.ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
|
org_policies::table
|
||||||
|
.filter(org_policies::org_uuid.eq(org_uuid))
|
||||||
|
.load::<Self>(&**conn)
|
||||||
|
.expect("Error loading org_policy")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
|
use crate::db::schema::users_organizations;
|
||||||
|
|
||||||
|
org_policies::table
|
||||||
|
.left_join(
|
||||||
|
users_organizations::table.on(
|
||||||
|
users_organizations::org_uuid.eq(org_policies::org_uuid)
|
||||||
|
.and(users_organizations::user_uuid.eq(user_uuid)))
|
||||||
|
)
|
||||||
|
.select(org_policies::all_columns)
|
||||||
|
.load::<Self>(&**conn)
|
||||||
|
.expect("Error loading org_policy")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Option<Self> {
|
||||||
|
org_policies::table
|
||||||
|
.filter(org_policies::org_uuid.eq(org_uuid))
|
||||||
|
.filter(org_policies::atype.eq(atype))
|
||||||
|
.first::<Self>(&**conn)
|
||||||
|
.ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
|
diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid)))
|
||||||
|
.execute(&**conn)
|
||||||
|
.map_res("Error deleting org_policy")
|
||||||
|
}
|
||||||
|
|
||||||
|
/*pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
|
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
|
||||||
|
.execute(&**conn)
|
||||||
|
.map_res("Error deleting twofactors")
|
||||||
|
}*/
|
||||||
|
}
|
@@ -1,7 +1,8 @@
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
|
use num_traits::FromPrimitive;
|
||||||
|
|
||||||
use super::{CollectionUser, User};
|
use super::{CollectionUser, User, OrgPolicy};
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
#[table_name = "organizations"]
|
#[table_name = "organizations"]
|
||||||
@@ -33,6 +34,7 @@ pub enum UserOrgStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||||
|
#[derive(FromPrimitive)]
|
||||||
pub enum UserOrgType {
|
pub enum UserOrgType {
|
||||||
Owner = 0,
|
Owner = 0,
|
||||||
Admin = 1,
|
Admin = 1,
|
||||||
@@ -135,16 +137,6 @@ impl UserOrgType {
|
|||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_i32(i: i32) -> Option<Self> {
|
|
||||||
match i {
|
|
||||||
0 => Some(UserOrgType::Owner),
|
|
||||||
1 => Some(UserOrgType::Admin),
|
|
||||||
2 => Some(UserOrgType::User),
|
|
||||||
3 => Some(UserOrgType::Manager),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
@@ -170,6 +162,7 @@ impl Organization {
|
|||||||
"UseEvents": false,
|
"UseEvents": false,
|
||||||
"UseGroups": false,
|
"UseGroups": false,
|
||||||
"UseTotp": true,
|
"UseTotp": true,
|
||||||
|
"UsePolicies": true,
|
||||||
|
|
||||||
"BusinessName": null,
|
"BusinessName": null,
|
||||||
"BusinessAddress1": null,
|
"BusinessAddress1": null,
|
||||||
@@ -250,6 +243,7 @@ impl Organization {
|
|||||||
Cipher::delete_all_by_organization(&self.uuid, &conn)?;
|
Cipher::delete_all_by_organization(&self.uuid, &conn)?;
|
||||||
Collection::delete_all_by_organization(&self.uuid, &conn)?;
|
Collection::delete_all_by_organization(&self.uuid, &conn)?;
|
||||||
UserOrganization::delete_all_by_organization(&self.uuid, &conn)?;
|
UserOrganization::delete_all_by_organization(&self.uuid, &conn)?;
|
||||||
|
OrgPolicy::delete_all_by_organization(&self.uuid, &conn)?;
|
||||||
|
|
||||||
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
|
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
|
||||||
.execute(&**conn)
|
.execute(&**conn)
|
||||||
@@ -267,7 +261,7 @@ impl Organization {
|
|||||||
impl UserOrganization {
|
impl UserOrganization {
|
||||||
pub fn to_json(&self, conn: &DbConn) -> Value {
|
pub fn to_json(&self, conn: &DbConn) -> Value {
|
||||||
let org = Organization::find_by_uuid(&self.org_uuid, conn).unwrap();
|
let org = Organization::find_by_uuid(&self.org_uuid, conn).unwrap();
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.org_uuid,
|
"Id": self.org_uuid,
|
||||||
"Name": org.name,
|
"Name": org.name,
|
||||||
@@ -280,6 +274,7 @@ impl UserOrganization {
|
|||||||
"UseEvents": false,
|
"UseEvents": false,
|
||||||
"UseGroups": false,
|
"UseGroups": false,
|
||||||
"UseTotp": true,
|
"UseTotp": true,
|
||||||
|
"UsePolicies": true,
|
||||||
|
|
||||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||||
|
|
||||||
|
@@ -77,6 +77,16 @@ table! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table! {
|
||||||
|
org_policies (uuid) {
|
||||||
|
uuid -> Varchar,
|
||||||
|
org_uuid -> Varchar,
|
||||||
|
atype -> Integer,
|
||||||
|
enabled -> Bool,
|
||||||
|
data -> Text,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
organizations (uuid) {
|
organizations (uuid) {
|
||||||
uuid -> Varchar,
|
uuid -> Varchar,
|
||||||
@@ -155,6 +165,7 @@ joinable!(devices -> users (user_uuid));
|
|||||||
joinable!(folders -> users (user_uuid));
|
joinable!(folders -> users (user_uuid));
|
||||||
joinable!(folders_ciphers -> ciphers (cipher_uuid));
|
joinable!(folders_ciphers -> ciphers (cipher_uuid));
|
||||||
joinable!(folders_ciphers -> folders (folder_uuid));
|
joinable!(folders_ciphers -> folders (folder_uuid));
|
||||||
|
joinable!(org_policies -> organizations (org_uuid));
|
||||||
joinable!(twofactor -> users (user_uuid));
|
joinable!(twofactor -> users (user_uuid));
|
||||||
joinable!(users_collections -> collections (collection_uuid));
|
joinable!(users_collections -> collections (collection_uuid));
|
||||||
joinable!(users_collections -> users (user_uuid));
|
joinable!(users_collections -> users (user_uuid));
|
||||||
@@ -170,6 +181,7 @@ allow_tables_to_appear_in_same_query!(
|
|||||||
folders,
|
folders,
|
||||||
folders_ciphers,
|
folders_ciphers,
|
||||||
invitations,
|
invitations,
|
||||||
|
org_policies,
|
||||||
organizations,
|
organizations,
|
||||||
twofactor,
|
twofactor,
|
||||||
users,
|
users,
|
||||||
|
@@ -77,6 +77,16 @@ table! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table! {
|
||||||
|
org_policies (uuid) {
|
||||||
|
uuid -> Text,
|
||||||
|
org_uuid -> Text,
|
||||||
|
atype -> Integer,
|
||||||
|
enabled -> Bool,
|
||||||
|
data -> Text,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
organizations (uuid) {
|
organizations (uuid) {
|
||||||
uuid -> Text,
|
uuid -> Text,
|
||||||
@@ -155,6 +165,7 @@ joinable!(devices -> users (user_uuid));
|
|||||||
joinable!(folders -> users (user_uuid));
|
joinable!(folders -> users (user_uuid));
|
||||||
joinable!(folders_ciphers -> ciphers (cipher_uuid));
|
joinable!(folders_ciphers -> ciphers (cipher_uuid));
|
||||||
joinable!(folders_ciphers -> folders (folder_uuid));
|
joinable!(folders_ciphers -> folders (folder_uuid));
|
||||||
|
joinable!(org_policies -> organizations (org_uuid));
|
||||||
joinable!(twofactor -> users (user_uuid));
|
joinable!(twofactor -> users (user_uuid));
|
||||||
joinable!(users_collections -> collections (collection_uuid));
|
joinable!(users_collections -> collections (collection_uuid));
|
||||||
joinable!(users_collections -> users (user_uuid));
|
joinable!(users_collections -> users (user_uuid));
|
||||||
@@ -170,6 +181,7 @@ allow_tables_to_appear_in_same_query!(
|
|||||||
folders,
|
folders,
|
||||||
folders_ciphers,
|
folders_ciphers,
|
||||||
invitations,
|
invitations,
|
||||||
|
org_policies,
|
||||||
organizations,
|
organizations,
|
||||||
twofactor,
|
twofactor,
|
||||||
users,
|
users,
|
||||||
|
@@ -77,6 +77,16 @@ table! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table! {
|
||||||
|
org_policies (uuid) {
|
||||||
|
uuid -> Text,
|
||||||
|
org_uuid -> Text,
|
||||||
|
atype -> Integer,
|
||||||
|
enabled -> Bool,
|
||||||
|
data -> Text,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
organizations (uuid) {
|
organizations (uuid) {
|
||||||
uuid -> Text,
|
uuid -> Text,
|
||||||
@@ -155,6 +165,7 @@ joinable!(devices -> users (user_uuid));
|
|||||||
joinable!(folders -> users (user_uuid));
|
joinable!(folders -> users (user_uuid));
|
||||||
joinable!(folders_ciphers -> ciphers (cipher_uuid));
|
joinable!(folders_ciphers -> ciphers (cipher_uuid));
|
||||||
joinable!(folders_ciphers -> folders (folder_uuid));
|
joinable!(folders_ciphers -> folders (folder_uuid));
|
||||||
|
joinable!(org_policies -> organizations (org_uuid));
|
||||||
joinable!(twofactor -> users (user_uuid));
|
joinable!(twofactor -> users (user_uuid));
|
||||||
joinable!(users_collections -> collections (collection_uuid));
|
joinable!(users_collections -> collections (collection_uuid));
|
||||||
joinable!(users_collections -> users (user_uuid));
|
joinable!(users_collections -> users (user_uuid));
|
||||||
@@ -170,6 +181,7 @@ allow_tables_to_appear_in_same_query!(
|
|||||||
folders,
|
folders,
|
||||||
folders_ciphers,
|
folders_ciphers,
|
||||||
invitations,
|
invitations,
|
||||||
|
org_policies,
|
||||||
organizations,
|
organizations,
|
||||||
twofactor,
|
twofactor,
|
||||||
users,
|
users,
|
||||||
|
@@ -44,10 +44,11 @@ fn mailer() -> SmtpTransport {
|
|||||||
_ => smtp_client,
|
_ => smtp_client,
|
||||||
};
|
};
|
||||||
|
|
||||||
let smtp_client = match &CONFIG.smtp_auth_mechanism() {
|
let smtp_client = match CONFIG.smtp_auth_mechanism() {
|
||||||
Some(auth_mechanism_json) => {
|
Some(mechanism) => {
|
||||||
let auth_mechanism = serde_json::from_str::<SmtpAuthMechanism>(&auth_mechanism_json);
|
let correct_mechanism = format!("\"{}\"", crate::util::upcase_first(&mechanism.trim_matches('"')));
|
||||||
match auth_mechanism {
|
|
||||||
|
match serde_json::from_str::<SmtpAuthMechanism>(&correct_mechanism) {
|
||||||
Ok(auth_mechanism) => smtp_client.authentication_mechanism(auth_mechanism),
|
Ok(auth_mechanism) => smtp_client.authentication_mechanism(auth_mechanism),
|
||||||
_ => panic!("Failure to parse mechanism. Is it proper Json? Eg. `\"Plain\"` not `Plain`"),
|
_ => panic!("Failure to parse mechanism. Is it proper Json? Eg. `\"Plain\"` not `Plain`"),
|
||||||
}
|
}
|
||||||
|
66
src/main.rs
66
src/main.rs
@@ -1,7 +1,6 @@
|
|||||||
#![feature(proc_macro_hygiene, vec_remove_item, try_trait, ip)]
|
#![feature(proc_macro_hygiene, vec_remove_item, try_trait, ip)]
|
||||||
#![recursion_limit = "256"]
|
#![recursion_limit = "256"]
|
||||||
|
|
||||||
#[cfg(feature = "openssl")]
|
|
||||||
extern crate openssl;
|
extern crate openssl;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate rocket;
|
extern crate rocket;
|
||||||
@@ -20,11 +19,14 @@ extern crate derive_more;
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate num_derive;
|
extern crate num_derive;
|
||||||
|
|
||||||
|
extern crate backtrace;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
fs::create_dir_all,
|
fs::create_dir_all,
|
||||||
path::Path,
|
path::Path,
|
||||||
process::{exit, Command},
|
process::{exit, Command},
|
||||||
str::FromStr,
|
str::FromStr,
|
||||||
|
panic, thread, fmt // For panic logging
|
||||||
};
|
};
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
@@ -42,6 +44,16 @@ pub use error::{Error, MapResult};
|
|||||||
|
|
||||||
use structopt::StructOpt;
|
use structopt::StructOpt;
|
||||||
|
|
||||||
|
// Used for catching panics and log them to file instead of stderr
|
||||||
|
use backtrace::Backtrace;
|
||||||
|
struct Shim(Backtrace);
|
||||||
|
|
||||||
|
impl fmt::Debug for Shim {
|
||||||
|
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(fmt, "\n{:?}", self.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, StructOpt)]
|
#[derive(Debug, StructOpt)]
|
||||||
#[structopt(name = "bitwarden_rs", about = "A Bitwarden API server written in Rust")]
|
#[structopt(name = "bitwarden_rs", about = "A Bitwarden API server written in Rust")]
|
||||||
struct Opt {
|
struct Opt {
|
||||||
@@ -76,7 +88,7 @@ fn main() {
|
|||||||
fn parse_args() {
|
fn parse_args() {
|
||||||
let opt = Opt::from_args();
|
let opt = Opt::from_args();
|
||||||
if opt.version {
|
if opt.version {
|
||||||
if let Some(version) = option_env!("GIT_VERSION") {
|
if let Some(version) = option_env!("BWRS_VERSION") {
|
||||||
println!("bitwarden_rs {}", version);
|
println!("bitwarden_rs {}", version);
|
||||||
} else {
|
} else {
|
||||||
println!("bitwarden_rs (Version info from Git not present)");
|
println!("bitwarden_rs (Version info from Git not present)");
|
||||||
@@ -89,7 +101,7 @@ fn launch_info() {
|
|||||||
println!("/--------------------------------------------------------------------\\");
|
println!("/--------------------------------------------------------------------\\");
|
||||||
println!("| Starting Bitwarden_RS |");
|
println!("| Starting Bitwarden_RS |");
|
||||||
|
|
||||||
if let Some(version) = option_env!("GIT_VERSION") {
|
if let Some(version) = option_env!("BWRS_VERSION") {
|
||||||
println!("|{:^68}|", format!("Version {}", version));
|
println!("|{:^68}|", format!("Version {}", version));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,6 +154,44 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
|||||||
|
|
||||||
logger.apply()?;
|
logger.apply()?;
|
||||||
|
|
||||||
|
// Catch panics and log them instead of default output to StdErr
|
||||||
|
panic::set_hook(Box::new(|info| {
|
||||||
|
let backtrace = Backtrace::new();
|
||||||
|
|
||||||
|
let thread = thread::current();
|
||||||
|
let thread = thread.name().unwrap_or("unnamed");
|
||||||
|
|
||||||
|
let msg = match info.payload().downcast_ref::<&'static str>() {
|
||||||
|
Some(s) => *s,
|
||||||
|
None => match info.payload().downcast_ref::<String>() {
|
||||||
|
Some(s) => &**s,
|
||||||
|
None => "Box<Any>",
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
match info.location() {
|
||||||
|
Some(location) => {
|
||||||
|
error!(
|
||||||
|
target: "panic", "thread '{}' panicked at '{}': {}:{}{:?}",
|
||||||
|
thread,
|
||||||
|
msg,
|
||||||
|
location.file(),
|
||||||
|
location.line(),
|
||||||
|
Shim(backtrace)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
error!(
|
||||||
|
target: "panic",
|
||||||
|
"thread '{}' panicked at '{}'{:?}",
|
||||||
|
thread,
|
||||||
|
msg,
|
||||||
|
Shim(backtrace)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -273,6 +323,16 @@ mod migrations {
|
|||||||
let connection = crate::db::get_connection().expect("Can't connect to DB");
|
let connection = crate::db::get_connection().expect("Can't connect to DB");
|
||||||
|
|
||||||
use std::io::stdout;
|
use std::io::stdout;
|
||||||
|
|
||||||
|
// Disable Foreign Key Checks during migration
|
||||||
|
use diesel::RunQueryDsl;
|
||||||
|
#[cfg(feature = "postgres")]
|
||||||
|
diesel::sql_query("SET CONSTRAINTS ALL DEFERRED").execute(&connection).expect("Failed to disable Foreign Key Checks during migrations");
|
||||||
|
#[cfg(feature = "mysql")]
|
||||||
|
diesel::sql_query("SET FOREIGN_KEY_CHECKS = 0").execute(&connection).expect("Failed to disable Foreign Key Checks during migrations");
|
||||||
|
#[cfg(feature = "sqlite")]
|
||||||
|
diesel::sql_query("PRAGMA defer_foreign_keys = ON").execute(&connection).expect("Failed to disable Foreign Key Checks during migrations");
|
||||||
|
|
||||||
embedded_migrations::run_with_output(&connection, &mut stdout()).expect("Can't run migrations");
|
embedded_migrations::run_with_output(&connection, &mut stdout()).expect("Can't run migrations");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -775,5 +775,71 @@
|
|||||||
"customercontrolpanel.de"
|
"customercontrolpanel.de"
|
||||||
],
|
],
|
||||||
"Excluded": false
|
"Excluded": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Type": 76,
|
||||||
|
"Domains": [
|
||||||
|
"docusign.com",
|
||||||
|
"docusign.net"
|
||||||
|
],
|
||||||
|
"Excluded": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Type": 77,
|
||||||
|
"Domains": [
|
||||||
|
"envato.com",
|
||||||
|
"themeforest.net",
|
||||||
|
"codecanyon.net",
|
||||||
|
"videohive.net",
|
||||||
|
"audiojungle.net",
|
||||||
|
"graphicriver.net",
|
||||||
|
"photodune.net",
|
||||||
|
"3docean.net"
|
||||||
|
],
|
||||||
|
"Excluded": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Type": 78,
|
||||||
|
"Domains": [
|
||||||
|
"x10hosting.com",
|
||||||
|
"x10premium.com"
|
||||||
|
],
|
||||||
|
"Excluded": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Type": 79,
|
||||||
|
"Domains": [
|
||||||
|
"dnsomatic.com",
|
||||||
|
"opendns.com",
|
||||||
|
"umbrella.com"
|
||||||
|
],
|
||||||
|
"Excluded": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Type": 80,
|
||||||
|
"Domains": [
|
||||||
|
"cagreatamerica.com",
|
||||||
|
"canadaswonderland.com",
|
||||||
|
"carowinds.com",
|
||||||
|
"cedarfair.com",
|
||||||
|
"cedarpoint.com",
|
||||||
|
"dorneypark.com",
|
||||||
|
"kingsdominion.com",
|
||||||
|
"knotts.com",
|
||||||
|
"miadventure.com",
|
||||||
|
"schlitterbahn.com",
|
||||||
|
"valleyfair.com",
|
||||||
|
"visitkingsisland.com",
|
||||||
|
"worldsoffun.com"
|
||||||
|
],
|
||||||
|
"Excluded": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Type": 81,
|
||||||
|
"Domains": [
|
||||||
|
"ubnt.com",
|
||||||
|
"ui.com"
|
||||||
|
],
|
||||||
|
"Excluded": false
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
Reference in New Issue
Block a user