Compare commits

...

33 Commits
1.14 ... 1.14.1

Author SHA1 Message Date
Daniel García
94341f9f3f Fix token error while accepting invite 2020-03-20 10:51:17 +01:00
Daniel García
ff19fb3426 Merge pull request #919 from BlackDex/issue-908
Fixed issue #908
2020-03-19 18:11:47 +01:00
BlackDex
baac8d9627 Fixed issue #908
The organization uuid is most of the time within the uri path as a
parameter. But sometimes it only is there as a query value.

This fix checks both, and returns the uuid when possible.
2020-03-19 17:37:10 +01:00
BlackDex
669b101e6a Fixing issue #908
Sometimes an org-uuid is not within the path but in a query value,
This fixes the check for that.
2020-03-19 16:50:47 +01:00
Daniel García
935f38692f Merge pull request #918 from dani-garcia/revert-901-feature/opportunistic_tls
Revert "Use opportunistic TLS in SMTP connections"
2020-03-19 13:58:00 +01:00
Daniel García
d2d9fb08cc Revert "Use opportunistic TLS in SMTP connections" 2020-03-19 13:56:53 +01:00
Daniel García
b85d548879 Merge pull request #916 from BlackDex/issue-759
Fixing issue #759 by disabling Foreign Key Checks.
2020-03-18 18:48:08 +01:00
BlackDex
35f30088b2 Fixing issue #759 by disabling Foreign Key Checks.
During migrations some queries are out of order regarding to foreign
keys.
Because of this the migrations fail when the sql database has this
enforced by default.
Turning of this check during the migrations will fix this and this is
only per session.
2020-03-18 18:11:11 +01:00
Daniel García
dce054e632 Merge pull request #912 from ymage/openssl_as_default
Fix alpine build with openssl crate as default
2020-03-16 23:02:07 +01:00
Ymage
ba725e1c25 Make openssl crate as default (non feature-flipped) 2020-03-16 22:39:10 +01:00
Ymage
b837348b25 Build as static 2020-03-16 22:34:59 +01:00
Daniel García
7d9c7017c9 Merge pull request #911 from BlackDex/upgrade-rocket
Upgrade rocket
2020-03-16 18:17:17 +01:00
Daniel García
d6b9b8bf0c Merge pull request #876 from BlackDex/log-panics
Make panics logable (as warn)
2020-03-16 18:16:49 +01:00
BlackDex
bd09fe1a3d Updated code so backtraces are logged also. 2020-03-16 17:53:22 +01:00
BlackDex
bcbe6177b8 Merge branch 'master' of https://github.com/dani-garcia/bitwarden_rs into log-panics 2020-03-16 17:19:27 +01:00
BlackDex
9b1d07365e Updated ring
Some small changes to match the updated ring package.
2020-03-16 16:39:20 +01:00
BlackDex
37b212427c Updated jsonwebtoken
Updated to the latest version of jsonwebtoken.
Some small code changes to match the new versions.
2020-03-16 16:38:00 +01:00
BlackDex
078234d8b3 Small change for rocket compatibilty 2020-03-16 16:36:44 +01:00
BlackDex
3ce0c3d1a5 Update dependencies
Primarily updating rocket, which needed some dependencies

Latest versions of:
 - ring
 - time
 - jsonwebtoken
 - yubico
 - rocket (git)
2020-03-16 16:32:33 +01:00
Daniel García
2ee07ea1d8 Fix empty data when cloning cipher 2020-03-15 17:26:34 +01:00
Daniel García
40c339db9b Fix postgres policies, second try 2020-03-14 23:53:12 +01:00
Daniel García
402c1cd06c Merge pull request #906 from BlackDex/upgrade-reqwest
Updated reqwest to the latest version.
2020-03-14 23:35:52 +01:00
Daniel García
819f340f39 Fix issue with postgres 2020-03-14 23:35:34 +01:00
BlackDex
1b4b40c95d Updated reqwest to the latest version.
- Use the blocking client (no async).
- Disabled gzip.
- use_sys_proxy is now default.
2020-03-14 23:12:45 +01:00
Daniel García
afd9f4e278 Allow the smtp mechanism to be provided without quotes and all lowercase 2020-03-14 22:31:41 +01:00
Daniel García
47a9461f39 Merge pull request #903 from TheBinaryLoop/patch-1
Updated domains with new values vualt
2020-03-14 14:41:39 +01:00
Daniel García
c6f64d8368 Merge pull request #901 from sleweke/feature/opportunistic_tls
Use opportunistic TLS in SMTP connections
2020-03-14 14:41:00 +01:00
Daniel García
edabf19ddf Update vault to 2.13.1 2020-03-14 14:40:06 +01:00
Daniel García
a30d5f4cf9 Fix cloning issues 2020-03-14 14:08:57 +01:00
Daniel García
3fa78e7bb1 Initial version of policies 2020-03-14 13:32:28 +01:00
Lukas Eßmann
a8a7e4f9a5 Updated domains with new values vualt
Added domains from official vault.bitwarden.com
2020-03-13 20:08:52 +01:00
Samuel Leweke
5d3b765a23 Use opportunistic TLS in SMTP connections
If SSL is disabled, the SMTP ClientSecurity of the lettre crate
defaults to None, that is, an insecure connection. This is changed to
Opportunistic, which uses TLS if available. If TLS is not available,
the insecure connection is used (i.e., this change is backward
compatible).
2020-03-12 11:40:52 +01:00
BlackDex
7439aeb63e Make panics logable (as warn)
panic!()'s only appear on stderr, this makes tracking down some strange
issues harder with the usage of docker since stderr does not get logged
into the bitwarden.log file. This change logs the message to stdout and
the logfile when activated.
2020-02-25 14:10:52 +01:00
39 changed files with 1031 additions and 572 deletions

965
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -26,7 +26,7 @@ rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
rocket_contrib = "0.5.0-dev"
# HTTP client
reqwest = "0.9.24"
reqwest = { version = "0.10.4", features = ["blocking", "json"] }
# multipart/form-data support
multipart = { version = "0.16.1", features = ["server"], default-features = false }
@@ -57,13 +57,14 @@ diesel_migrations = "1.4.0"
libsqlite3-sys = { version = "0.16.0", features = ["bundled"], optional = true }
# Crypto library
ring = "0.14.6"
ring = "0.16.11"
# UUID generation
uuid = { version = "0.8.1", features = ["v4"] }
# Date and time library for Rust
# Date and time librar for Rust
chrono = "0.4.11"
time = "0.2.9"
# TOTP library
oath = "0.10.2"
@@ -72,13 +73,13 @@ oath = "0.10.2"
data-encoding = "2.2.0"
# JWT library
jsonwebtoken = "6.0.1"
jsonwebtoken = "7.1.0"
# U2F library
u2f = "0.2.0"
# Yubico Library
yubico = { version = "0.7.1", features = ["online-tokio"], default-features = false }
yubico = { version = "0.9.0", features = ["online-tokio"], default-features = false }
# A `dotenv` implementation for Rust
dotenv = { version = "0.15.0", default-features = false }
@@ -117,10 +118,13 @@ idna = "0.2.0"
# CLI argument parsing
structopt = "0.3.11"
# Logging panics to logfile instead stderr only
backtrace = "0.3.45"
[patch.crates-io]
# Use newest ring
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'b95b6765e1cc8be7c1e7eaef8a9d9ad940b0ac13' }
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'b95b6765e1cc8be7c1e7eaef8a9d9ad940b0ac13' }
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dfc9e9aab01d349da32c52db393e35b7fffea63c' }
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dfc9e9aab01d349da32c52db393e35b7fffea63c' }
# Use git version for timeout fix #706
lettre = { git = 'https://github.com/lettre/lettre', rev = '245c600c82ee18b766e8729f005ff453a55dce34' }

View File

@@ -27,7 +27,7 @@
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
{% set vault_image_hash = "sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c" %}
{% set vault_image_hash = "sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c" %}
{% raw %}
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
# It can be viewed in multiple ways:
@@ -37,7 +37,7 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
{% endraw %}
FROM bitwardenrs/web-vault@{{ vault_image_hash }} as vault
@@ -71,6 +71,7 @@ RUN rustup set profile minimal
{% if "alpine" in target_file %}
ENV USER "root"
ENV RUSTFLAGS='-C link-arg=-s'
{% elif "aarch64" in target_file or "armv" in target_file %}
# Install required build libs for {{ package_arch_name }} architecture.

View File

@@ -14,8 +14,8 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because

View File

@@ -14,8 +14,8 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because

View File

@@ -14,8 +14,8 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because

View File

@@ -14,8 +14,8 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary

View File

@@ -14,8 +14,8 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because

View File

@@ -14,8 +14,8 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary

View File

@@ -14,8 +14,8 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because

View File

@@ -14,8 +14,8 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary

View File

@@ -14,8 +14,8 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because

View File

@@ -14,8 +14,8 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because

View File

@@ -14,8 +14,8 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because

View File

@@ -14,8 +14,8 @@
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c
FROM bitwardenrs/web-vault@sha256:ce56b3f5e538351411785ac45e9b4b913259c3508b1323d62e8fa0f30717dd1c as vault
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because

View File

@@ -0,0 +1 @@
DROP TABLE org_policies;

View File

@@ -0,0 +1,9 @@
CREATE TABLE org_policies (
uuid CHAR(36) NOT NULL PRIMARY KEY,
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
atype INTEGER NOT NULL,
enabled BOOLEAN NOT NULL,
data TEXT NOT NULL,
UNIQUE (org_uuid, atype)
);

View File

@@ -0,0 +1 @@
DROP TABLE org_policies;

View File

@@ -0,0 +1,9 @@
CREATE TABLE org_policies (
uuid CHAR(36) NOT NULL PRIMARY KEY,
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
atype INTEGER NOT NULL,
enabled BOOLEAN NOT NULL,
data TEXT NOT NULL,
UNIQUE (org_uuid, atype)
);

View File

@@ -0,0 +1 @@
DROP TABLE org_policies;

View File

@@ -0,0 +1,9 @@
CREATE TABLE org_policies (
uuid TEXT NOT NULL PRIMARY KEY,
org_uuid TEXT NOT NULL REFERENCES organizations (uuid),
atype INTEGER NOT NULL,
enabled BOOLEAN NOT NULL,
data TEXT NOT NULL,
UNIQUE (org_uuid, atype)
);

View File

@@ -91,7 +91,7 @@ fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -
let cookie = Cookie::build(COOKIE_NAME, jwt)
.path(admin_path())
.max_age(chrono::Duration::minutes(20))
.max_age(time::Duration::minutes(20))
.same_site(SameSite::Strict)
.http_only(true)
.finish();

View File

@@ -79,6 +79,9 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
let ciphers_json: Vec<Value> = ciphers
.iter()
@@ -95,6 +98,7 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
"Profile": user_json,
"Folders": folders_json,
"Collections": collections_json,
"Policies": policies_json,
"Ciphers": ciphers_json,
"Domains": domains_json,
"Object": "sync"
@@ -599,10 +603,14 @@ fn share_cipher_by_uuid(
None => err!("Cipher doesn't exist"),
};
match data.Cipher.OrganizationId.clone() {
None => err!("Organization id not provided"),
Some(organization_uuid) => {
let mut shared_to_collection = false;
match data.Cipher.OrganizationId.clone() {
// If we don't get an organization ID, we don't do anything
// No error because this is used when using the Clone functionality
None => {},
Some(organization_uuid) => {
for uuid in &data.CollectionIds {
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
None => err!("Invalid collection ID provided"),
@@ -616,6 +624,9 @@ fn share_cipher_by_uuid(
}
}
}
}
};
update_cipher_from_data(
&mut cipher,
data.Cipher,
@@ -628,8 +639,6 @@ fn share_cipher_by_uuid(
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
}
}
}
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
fn post_attachment(

View File

@@ -146,10 +146,10 @@ fn hibp_breach(username: String) -> JsonResult {
username
);
use reqwest::{header::USER_AGENT, Client};
use reqwest::{header::USER_AGENT, blocking::Client};
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
let hibp_client = Client::builder().use_sys_proxy().build()?;
let hibp_client = Client::builder().build()?;
let res = hibp_client
.get(&url)

View File

@@ -2,6 +2,7 @@ use rocket::request::Form;
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json::Value;
use num_traits::FromPrimitive;
use crate::api::{
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType,
@@ -45,6 +46,10 @@ pub fn routes() -> Vec<Route> {
delete_user,
post_delete_user,
post_org_import,
list_policies,
list_policies_token,
get_policy,
put_policy,
]
}
@@ -830,22 +835,13 @@ struct RelationsData {
fn post_org_import(
query: Form<OrgIdData>,
data: JsonUpcase<ImportData>,
headers: Headers,
headers: AdminHeaders,
conn: DbConn,
nt: Notify,
) -> EmptyResult {
let data: ImportData = data.into_inner().data;
let org_id = query.into_inner().organization_id;
let org_user = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
Some(user) => user,
None => err!("User is not part of the organization"),
};
if org_user.atype < UserOrgType::Admin {
err!("Only admins or owners can import into an organization")
}
// Read and create the collections
let collections: Vec<_> = data
.Collections
@@ -866,6 +862,8 @@ fn post_org_import(
relations.push((relation.Key, relation.Value));
}
let headers: Headers = headers.into();
// Read and create the ciphers
let ciphers: Vec<_> = data
.Ciphers
@@ -901,3 +899,83 @@ fn post_org_import(
let mut user = headers.user;
user.update_revision(&conn)
}
#[get("/organizations/<org_id>/policies")]
fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
let policies = OrgPolicy::find_by_org(&org_id, &conn);
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
Ok(Json(json!({
"Data": policies_json,
"Object": "list",
"ContinuationToken": null
})))
}
#[get("/organizations/<org_id>/policies/token?<token>")]
fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResult {
let invite = crate::auth::decode_invite(&token)?;
let invite_org_id = match invite.org_id {
Some(invite_org_id) => invite_org_id,
None => err!("Invalid token"),
};
if invite_org_id != org_id {
err!("Token doesn't match request organization");
}
// TODO: We receive the invite token as ?token=<>, validate it contains the org id
let policies = OrgPolicy::find_by_org(&org_id, &conn);
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
Ok(Json(json!({
"Data": policies_json,
"Object": "list",
"ContinuationToken": null
})))
}
#[get("/organizations/<org_id>/policies/<pol_type>")]
fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
Some(pt) => pt,
None => err!("Invalid policy type"),
};
let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
Some(p) => p,
None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()),
};
Ok(Json(policy.to_json()))
}
#[derive(Deserialize)]
struct PolicyData {
enabled: bool,
#[serde(rename = "type")]
_type: i32,
data: Value,
}
#[put("/organizations/<org_id>/policies/<pol_type>", data = "<data>")]
fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
let data: PolicyData = data.into_inner();
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
Some(pt) => pt,
None => err!("Invalid policy type"),
};
let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
Some(p) => p,
None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()),
};
policy.enabled = data.enabled;
policy.data = serde_json::to_string(&data.data)?;
policy.save(&conn)?;
Ok(Json(policy.to_json()))
}

View File

@@ -187,7 +187,7 @@ fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbC
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
use reqwest::{header::*, Client, Method};
use reqwest::{header::*, Method, blocking::Client};
use std::str::FromStr;
let url = format!("https://{}{}", &data.host, path);

View File

@@ -8,7 +8,7 @@ use rocket::http::ContentType;
use rocket::response::Content;
use rocket::Route;
use reqwest::{header::HeaderMap, Client, Response, Url};
use reqwest::{Url, header::HeaderMap, blocking::Client, blocking::Response};
use rocket::http::Cookie;
@@ -30,8 +30,6 @@ const ALLOWED_CHARS: &str = "_-.";
static CLIENT: Lazy<Client> = Lazy::new(|| {
// Reuse the client between requests
Client::builder()
.use_sys_proxy()
.gzip(true)
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
.default_headers(_header_map())
.build()

View File

@@ -4,8 +4,9 @@
use crate::util::read_file;
use chrono::{Duration, Utc};
use once_cell::sync::Lazy;
use num_traits::FromPrimitive;
use jsonwebtoken::{self, Algorithm, Header};
use jsonwebtoken::{self, Algorithm, Header, EncodingKey, DecodingKey};
use serde::de::DeserializeOwned;
use serde::ser::Serialize;
@@ -31,7 +32,7 @@ static PUBLIC_RSA_KEY: Lazy<Vec<u8>> = Lazy::new(|| match read_file(&CONFIG.publ
});
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
match jsonwebtoken::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
match jsonwebtoken::encode(&JWT_HEADER, claims, &EncodingKey::from_rsa_der(&PRIVATE_RSA_KEY)) {
Ok(token) => token,
Err(e) => panic!("Error encoding jwt {}", e),
}
@@ -50,7 +51,7 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
let token = token.replace(char::is_whitespace, "");
jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation)
jsonwebtoken::decode(&token, &DecodingKey::from_rsa_der(&PUBLIC_RSA_KEY), &validation)
.map(|d| d.claims)
.map_res("Error decoding JWT")
}
@@ -306,6 +307,25 @@ pub struct OrgHeaders {
pub org_user_type: UserOrgType,
}
// org_id is usually the second param ("/organizations/<org_id>")
// But there are cases where it is located in a query value.
// First check the param, if this is not a valid uuid, we will try the query value.
fn get_org_id(request: &Request) -> Option<String> {
if let Some(Ok(org_id)) = request.get_param::<String>(1) {
if uuid::Uuid::parse_str(&org_id).is_ok() {
return Some(org_id);
}
}
if let Some(Ok(org_id)) = request.get_query_value::<String>("organizationId") {
if uuid::Uuid::parse_str(&org_id).is_ok() {
return Some(org_id);
}
}
None
}
impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
type Error = &'static str;
@@ -314,9 +334,8 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
Outcome::Forward(_) => Outcome::Forward(()),
Outcome::Failure(f) => Outcome::Failure(f),
Outcome::Success(headers) => {
// org_id is expected to be the second param ("/organizations/<org_id>")
match request.get_param::<String>(1) {
Some(Ok(org_id)) => {
match get_org_id(request) {
Some(org_id) => {
let conn = match request.guard::<DbConn>() {
Outcome::Success(conn) => conn,
_ => err_handler!("Error getting DB"),
@@ -347,7 +366,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
}
},
})
}
},
_ => err_handler!("Error getting the organization id"),
}
}
@@ -385,6 +404,16 @@ impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
}
}
impl Into<Headers> for AdminHeaders {
fn into(self) -> Headers {
Headers {
host: self.host,
device: self.device,
user: self.user
}
}
}
pub struct OwnerHeaders {
pub host: String,
pub device: Device,

View File

@@ -6,7 +6,7 @@ use crate::error::Error;
use ring::{digest, hmac, pbkdf2};
use std::num::NonZeroU32;
static DIGEST_ALG: &digest::Algorithm = &digest::SHA256;
static DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256;
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {
@@ -29,7 +29,7 @@ pub fn verify_password_hash(secret: &[u8], salt: &[u8], previous: &[u8], iterati
pub fn hmac_sign(key: &str, data: &str) -> String {
use data_encoding::HEXLOWER;
let key = hmac::SigningKey::new(&digest::SHA1, key.as_bytes());
let key = hmac::Key::new(hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY, key.as_bytes());
let signature = hmac::sign(&key, data.as_bytes());
HEXLOWER.encode(signature.as_ref())

View File

@@ -76,7 +76,8 @@ impl<'a, 'r> FromRequest<'a, 'r> for DbConn {
type Error = ();
fn from_request(request: &'a Request<'r>) -> request::Outcome<DbConn, ()> {
let pool = request.guard::<State<Pool>>()?;
// https://github.com/SergioBenitez/Rocket/commit/e3c1a4ad3ab9b840482ec6de4200d30df43e357c
let pool = try_outcome!(request.guard::<State<Pool>>());
match pool.get() {
Ok(conn) => Outcome::Success(DbConn(conn)),
Err(_) => Outcome::Failure((Status::ServiceUnavailable, ())),

View File

@@ -7,6 +7,7 @@ mod user;
mod collection;
mod organization;
mod two_factor;
mod org_policy;
pub use self::attachment::Attachment;
pub use self::cipher::Cipher;
@@ -17,3 +18,4 @@ pub use self::organization::Organization;
pub use self::organization::{UserOrgStatus, UserOrgType, UserOrganization};
pub use self::two_factor::{TwoFactor, TwoFactorType};
pub use self::user::{Invitation, User};
pub use self::org_policy::{OrgPolicy, OrgPolicyType};

142
src/db/models/org_policy.rs Normal file
View File

@@ -0,0 +1,142 @@
use diesel;
use diesel::prelude::*;
use serde_json::Value;
use crate::api::EmptyResult;
use crate::db::schema::org_policies;
use crate::db::DbConn;
use crate::error::MapResult;
use super::Organization;
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
#[table_name = "org_policies"]
#[belongs_to(Organization, foreign_key = "org_uuid")]
#[primary_key(uuid)]
pub struct OrgPolicy {
pub uuid: String,
pub org_uuid: String,
pub atype: i32,
pub enabled: bool,
pub data: String,
}
#[allow(dead_code)]
#[derive(FromPrimitive)]
pub enum OrgPolicyType {
TwoFactorAuthentication = 0,
MasterPassword = 1,
PasswordGenerator = 2,
}
/// Local methods
impl OrgPolicy {
pub fn new(org_uuid: String, atype: OrgPolicyType, data: String) -> Self {
Self {
uuid: crate::util::get_uuid(),
org_uuid,
atype: atype as i32,
enabled: false,
data,
}
}
pub fn to_json(&self) -> Value {
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
json!({
"Id": self.uuid,
"OrganizationId": self.org_uuid,
"Type": self.atype,
"Data": data_json,
"Enabled": self.enabled,
"Object": "policy",
})
}
}
/// Database methods
impl OrgPolicy {
#[cfg(feature = "postgresql")]
pub fn save(&self, conn: &DbConn) -> EmptyResult {
// We need to make sure we're not going to violate the unique constraint on org_uuid and atype.
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
// not support multiple constraints on ON CONFLICT clauses.
diesel::delete(
org_policies::table
.filter(org_policies::org_uuid.eq(&self.org_uuid))
.filter(org_policies::atype.eq(&self.atype)),
)
.execute(&**conn)
.map_res("Error deleting org_policy for insert")?;
diesel::insert_into(org_policies::table)
.values(self)
.on_conflict(org_policies::uuid)
.do_update()
.set(self)
.execute(&**conn)
.map_res("Error saving org_policy")
}
#[cfg(not(feature = "postgresql"))]
pub fn save(&self, conn: &DbConn) -> EmptyResult {
diesel::replace_into(org_policies::table)
.values(&*self)
.execute(&**conn)
.map_res("Error saving org_policy")
}
pub fn delete(self, conn: &DbConn) -> EmptyResult {
diesel::delete(org_policies::table.filter(org_policies::uuid.eq(self.uuid)))
.execute(&**conn)
.map_res("Error deleting org_policy")
}
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
org_policies::table
.filter(org_policies::uuid.eq(uuid))
.first::<Self>(&**conn)
.ok()
}
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
org_policies::table
.filter(org_policies::org_uuid.eq(org_uuid))
.load::<Self>(&**conn)
.expect("Error loading org_policy")
}
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
use crate::db::schema::users_organizations;
org_policies::table
.left_join(
users_organizations::table.on(
users_organizations::org_uuid.eq(org_policies::org_uuid)
.and(users_organizations::user_uuid.eq(user_uuid)))
)
.select(org_policies::all_columns)
.load::<Self>(&**conn)
.expect("Error loading org_policy")
}
pub fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Option<Self> {
org_policies::table
.filter(org_policies::org_uuid.eq(org_uuid))
.filter(org_policies::atype.eq(atype))
.first::<Self>(&**conn)
.ok()
}
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid)))
.execute(&**conn)
.map_res("Error deleting org_policy")
}
/*pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
.execute(&**conn)
.map_res("Error deleting twofactors")
}*/
}

View File

@@ -1,7 +1,8 @@
use serde_json::Value;
use std::cmp::Ordering;
use num_traits::FromPrimitive;
use super::{CollectionUser, User};
use super::{CollectionUser, User, OrgPolicy};
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
#[table_name = "organizations"]
@@ -33,6 +34,7 @@ pub enum UserOrgStatus {
}
#[derive(Copy, Clone, PartialEq, Eq)]
#[derive(FromPrimitive)]
pub enum UserOrgType {
Owner = 0,
Admin = 1,
@@ -135,16 +137,6 @@ impl UserOrgType {
_ => None,
}
}
pub fn from_i32(i: i32) -> Option<Self> {
match i {
0 => Some(UserOrgType::Owner),
1 => Some(UserOrgType::Admin),
2 => Some(UserOrgType::User),
3 => Some(UserOrgType::Manager),
_ => None,
}
}
}
/// Local methods
@@ -170,6 +162,7 @@ impl Organization {
"UseEvents": false,
"UseGroups": false,
"UseTotp": true,
"UsePolicies": true,
"BusinessName": null,
"BusinessAddress1": null,
@@ -250,6 +243,7 @@ impl Organization {
Cipher::delete_all_by_organization(&self.uuid, &conn)?;
Collection::delete_all_by_organization(&self.uuid, &conn)?;
UserOrganization::delete_all_by_organization(&self.uuid, &conn)?;
OrgPolicy::delete_all_by_organization(&self.uuid, &conn)?;
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
.execute(&**conn)
@@ -280,6 +274,7 @@ impl UserOrganization {
"UseEvents": false,
"UseGroups": false,
"UseTotp": true,
"UsePolicies": true,
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side

View File

@@ -77,6 +77,16 @@ table! {
}
}
table! {
org_policies (uuid) {
uuid -> Varchar,
org_uuid -> Varchar,
atype -> Integer,
enabled -> Bool,
data -> Text,
}
}
table! {
organizations (uuid) {
uuid -> Varchar,
@@ -155,6 +165,7 @@ joinable!(devices -> users (user_uuid));
joinable!(folders -> users (user_uuid));
joinable!(folders_ciphers -> ciphers (cipher_uuid));
joinable!(folders_ciphers -> folders (folder_uuid));
joinable!(org_policies -> organizations (org_uuid));
joinable!(twofactor -> users (user_uuid));
joinable!(users_collections -> collections (collection_uuid));
joinable!(users_collections -> users (user_uuid));
@@ -170,6 +181,7 @@ allow_tables_to_appear_in_same_query!(
folders,
folders_ciphers,
invitations,
org_policies,
organizations,
twofactor,
users,

View File

@@ -77,6 +77,16 @@ table! {
}
}
table! {
org_policies (uuid) {
uuid -> Text,
org_uuid -> Text,
atype -> Integer,
enabled -> Bool,
data -> Text,
}
}
table! {
organizations (uuid) {
uuid -> Text,
@@ -155,6 +165,7 @@ joinable!(devices -> users (user_uuid));
joinable!(folders -> users (user_uuid));
joinable!(folders_ciphers -> ciphers (cipher_uuid));
joinable!(folders_ciphers -> folders (folder_uuid));
joinable!(org_policies -> organizations (org_uuid));
joinable!(twofactor -> users (user_uuid));
joinable!(users_collections -> collections (collection_uuid));
joinable!(users_collections -> users (user_uuid));
@@ -170,6 +181,7 @@ allow_tables_to_appear_in_same_query!(
folders,
folders_ciphers,
invitations,
org_policies,
organizations,
twofactor,
users,

View File

@@ -77,6 +77,16 @@ table! {
}
}
table! {
org_policies (uuid) {
uuid -> Text,
org_uuid -> Text,
atype -> Integer,
enabled -> Bool,
data -> Text,
}
}
table! {
organizations (uuid) {
uuid -> Text,
@@ -155,6 +165,7 @@ joinable!(devices -> users (user_uuid));
joinable!(folders -> users (user_uuid));
joinable!(folders_ciphers -> ciphers (cipher_uuid));
joinable!(folders_ciphers -> folders (folder_uuid));
joinable!(org_policies -> organizations (org_uuid));
joinable!(twofactor -> users (user_uuid));
joinable!(users_collections -> collections (collection_uuid));
joinable!(users_collections -> users (user_uuid));
@@ -170,6 +181,7 @@ allow_tables_to_appear_in_same_query!(
folders,
folders_ciphers,
invitations,
org_policies,
organizations,
twofactor,
users,

View File

@@ -44,10 +44,11 @@ fn mailer() -> SmtpTransport {
_ => smtp_client,
};
let smtp_client = match &CONFIG.smtp_auth_mechanism() {
Some(auth_mechanism_json) => {
let auth_mechanism = serde_json::from_str::<SmtpAuthMechanism>(&auth_mechanism_json);
match auth_mechanism {
let smtp_client = match CONFIG.smtp_auth_mechanism() {
Some(mechanism) => {
let correct_mechanism = format!("\"{}\"", crate::util::upcase_first(&mechanism.trim_matches('"')));
match serde_json::from_str::<SmtpAuthMechanism>(&correct_mechanism) {
Ok(auth_mechanism) => smtp_client.authentication_mechanism(auth_mechanism),
_ => panic!("Failure to parse mechanism. Is it proper Json? Eg. `\"Plain\"` not `Plain`"),
}

View File

@@ -1,7 +1,6 @@
#![feature(proc_macro_hygiene, vec_remove_item, try_trait, ip)]
#![recursion_limit = "256"]
#[cfg(feature = "openssl")]
extern crate openssl;
#[macro_use]
extern crate rocket;
@@ -20,11 +19,14 @@ extern crate derive_more;
#[macro_use]
extern crate num_derive;
extern crate backtrace;
use std::{
fs::create_dir_all,
path::Path,
process::{exit, Command},
str::FromStr,
panic, thread, fmt // For panic logging
};
#[macro_use]
@@ -42,6 +44,16 @@ pub use error::{Error, MapResult};
use structopt::StructOpt;
// Used for catching panics and log them to file instead of stderr
use backtrace::Backtrace;
struct Shim(Backtrace);
impl fmt::Debug for Shim {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "\n{:?}", self.0)
}
}
#[derive(Debug, StructOpt)]
#[structopt(name = "bitwarden_rs", about = "A Bitwarden API server written in Rust")]
struct Opt {
@@ -142,6 +154,44 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
logger.apply()?;
// Catch panics and log them instead of default output to StdErr
panic::set_hook(Box::new(|info| {
let backtrace = Backtrace::new();
let thread = thread::current();
let thread = thread.name().unwrap_or("unnamed");
let msg = match info.payload().downcast_ref::<&'static str>() {
Some(s) => *s,
None => match info.payload().downcast_ref::<String>() {
Some(s) => &**s,
None => "Box<Any>",
},
};
match info.location() {
Some(location) => {
error!(
target: "panic", "thread '{}' panicked at '{}': {}:{}{:?}",
thread,
msg,
location.file(),
location.line(),
Shim(backtrace)
);
}
None => {
error!(
target: "panic",
"thread '{}' panicked at '{}'{:?}",
thread,
msg,
Shim(backtrace)
)
}
}
}));
Ok(())
}
@@ -273,6 +323,16 @@ mod migrations {
let connection = crate::db::get_connection().expect("Can't connect to DB");
use std::io::stdout;
// Disable Foreign Key Checks during migration
use diesel::RunQueryDsl;
#[cfg(feature = "postgres")]
diesel::sql_query("SET CONSTRAINTS ALL DEFERRED").execute(&connection).expect("Failed to disable Foreign Key Checks during migrations");
#[cfg(feature = "mysql")]
diesel::sql_query("SET FOREIGN_KEY_CHECKS = 0").execute(&connection).expect("Failed to disable Foreign Key Checks during migrations");
#[cfg(feature = "sqlite")]
diesel::sql_query("PRAGMA defer_foreign_keys = ON").execute(&connection).expect("Failed to disable Foreign Key Checks during migrations");
embedded_migrations::run_with_output(&connection, &mut stdout()).expect("Can't run migrations");
}
}

View File

@@ -775,5 +775,71 @@
"customercontrolpanel.de"
],
"Excluded": false
},
{
"Type": 76,
"Domains": [
"docusign.com",
"docusign.net"
],
"Excluded": false
},
{
"Type": 77,
"Domains": [
"envato.com",
"themeforest.net",
"codecanyon.net",
"videohive.net",
"audiojungle.net",
"graphicriver.net",
"photodune.net",
"3docean.net"
],
"Excluded": false
},
{
"Type": 78,
"Domains": [
"x10hosting.com",
"x10premium.com"
],
"Excluded": false
},
{
"Type": 79,
"Domains": [
"dnsomatic.com",
"opendns.com",
"umbrella.com"
],
"Excluded": false
},
{
"Type": 80,
"Domains": [
"cagreatamerica.com",
"canadaswonderland.com",
"carowinds.com",
"cedarfair.com",
"cedarpoint.com",
"dorneypark.com",
"kingsdominion.com",
"knotts.com",
"miadventure.com",
"schlitterbahn.com",
"valleyfair.com",
"visitkingsisland.com",
"worldsoffun.com"
],
"Excluded": false
},
{
"Type": 81,
"Domains": [
"ubnt.com",
"ui.com"
],
"Excluded": false
}
]