mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-09 18:25:58 +03:00
Compare commits
253 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
2c2276c5bb | ||
|
672a245548 | ||
|
2d2745195e | ||
|
026f9da035 | ||
|
d23d4f2c1d | ||
|
515b87755a | ||
|
d8ea3d2bfe | ||
|
ee7837d022 | ||
|
07743e490b | ||
|
9101d6e48f | ||
|
27c23b60b8 | ||
|
e7b6238f43 | ||
|
8be2ed6255 | ||
|
c9c3f07171 | ||
|
8a21c6df10 | ||
|
df71f57d86 | ||
|
60e39a9dd1 | ||
|
bc6a53b847 | ||
|
05a1137828 | ||
|
cef38bf40b | ||
|
0b13a8c4aa | ||
|
3fbd7919d8 | ||
|
5f688ff209 | ||
|
f6cfb5bf21 | ||
|
df8c9f39ac | ||
|
d7ee7caed4 | ||
|
2e300da057 | ||
|
3fb63bbe8c | ||
|
9671ed4cca | ||
|
d10ef3fd4b | ||
|
dd0b847912 | ||
|
8c34ff5d23 | ||
|
15750256e2 | ||
|
6989fc7bdb | ||
|
4923614730 | ||
|
76f38621de | ||
|
fff72889f6 | ||
|
12af32b9ea | ||
|
9add8e19eb | ||
|
5710703c50 | ||
|
1322b876e9 | ||
|
9ed2ba61c6 | ||
|
62a461ae15 | ||
|
6f7220b68e | ||
|
4859932d35 | ||
|
ee277de707 | ||
|
c11f47903a | ||
|
6a5f1613e7 | ||
|
dc36f0cb6c | ||
|
6c38026ef5 | ||
|
4c9cc9890c | ||
|
f57b407c60 | ||
|
ce0651b79c | ||
|
edc26cb1e1 | ||
|
ff759397f6 | ||
|
badd22ac3d | ||
|
6f78395ef7 | ||
|
5fb6531db8 | ||
|
eb9d5e1196 | ||
|
233b48bdad | ||
|
e22e290f67 | ||
|
ab95a69dc8 | ||
|
85c8a01f4a | ||
|
42af7c6dab | ||
|
08a445e2ac | ||
|
c0b2877da3 | ||
|
cf8ca85289 | ||
|
a8a92f6c51 | ||
|
95f833aacd | ||
|
4f45cc081f | ||
|
2a4cd24c60 | ||
|
ef551f4cc6 | ||
|
4545f271c3 | ||
|
2768396a72 | ||
|
5521a86693 | ||
|
3160780549 | ||
|
f0701657a9 | ||
|
21325b7523 | ||
|
874f5c34bd | ||
|
eadab2e9ca | ||
|
253faaf023 | ||
|
3d843a6a51 | ||
|
03fdf36bf9 | ||
|
fdcc32beda | ||
|
bf20355c5e | ||
|
0136c793b4 | ||
|
2e12114350 | ||
|
f25ab42ebb | ||
|
d3a8a278e6 | ||
|
8d9827c55f | ||
|
cad63f9761 | ||
|
bf446f44f9 | ||
|
621f607297 | ||
|
d89bd707a8 | ||
|
754087b990 | ||
|
cfbeb56371 | ||
|
3bb46ce496 | ||
|
c5832f2b30 | ||
|
d9406b0095 | ||
|
2475c36a75 | ||
|
c384f9c0ca | ||
|
afbfebf659 | ||
|
6b686c18f7 | ||
|
349cb33fbd | ||
|
d7542b6818 | ||
|
7976d39d9d | ||
|
5ee9676941 | ||
|
4b40cda910 | ||
|
4689ed7b30 | ||
|
084bc2aee3 | ||
|
6d7e15b2fd | ||
|
61515160a7 | ||
|
a25bfdd16d | ||
|
e93538cea9 | ||
|
b4244b28b6 | ||
|
43f9038325 | ||
|
27872f476e | ||
|
339044f8aa | ||
|
0718a090e1 | ||
|
9e1f030a80 | ||
|
04922f6aa0 | ||
|
7d2bc9e162 | ||
|
c6c00729e3 | ||
|
10756b0920 | ||
|
1eb1502a07 | ||
|
30e72a96a9 | ||
|
2646db78a4 | ||
|
f5358b13f5 | ||
|
d156170971 | ||
|
d9bfe847db | ||
|
473f8b8e31 | ||
|
aeb4b4c8a5 | ||
|
980a3e45db | ||
|
5794969f5b | ||
|
8b5b06c3d1 | ||
|
b50c27b619 | ||
|
5ee04e31e5 | ||
|
bf6ae91a6d | ||
|
828e3a5795 | ||
|
7b5bcd45f8 | ||
|
72de16fb86 | ||
|
0b903fc5f4 | ||
|
4df686f49e | ||
|
d7eeaaf249 | ||
|
84fb6aaddb | ||
|
a744b9437a | ||
|
6027b969f5 | ||
|
93805a5d7b | ||
|
71da961ecd | ||
|
dd421809e5 | ||
|
8526055bb7 | ||
|
a79334ea4c | ||
|
274ea9a4f2 | ||
|
8743d18aca | ||
|
d3773a433a | ||
|
0f0a87becf | ||
|
4b57bb8eeb | ||
|
3b27dbb0aa | ||
|
ff2fbd322e | ||
|
9636f33fdb | ||
|
bbe2a1b264 | ||
|
79fdfd6524 | ||
|
d086a99e5b | ||
|
22b0b95209 | ||
|
28d1588e73 | ||
|
f3b1a5ff3e | ||
|
330e90a6ac | ||
|
8fac72db53 | ||
|
820c8b0dce | ||
|
8b4a6f2a64 | ||
|
ef63342e20 | ||
|
89840790e7 | ||
|
a72809b225 | ||
|
9976e4736e | ||
|
dc92f07232 | ||
|
3db815b969 | ||
|
ade293cf52 | ||
|
877408b808 | ||
|
86ed75bf7c | ||
|
20d8d800f3 | ||
|
7ce06b3808 | ||
|
08ca47cadb | ||
|
0bd3a26051 | ||
|
5272b465cc | ||
|
b75f38033b | ||
|
637f655b6f | ||
|
b3f7394c06 | ||
|
1a5ecd4d4a | ||
|
bd65c4e312 | ||
|
bce656c787 | ||
|
06522c9ac0 | ||
|
9026cc8d42 | ||
|
574b040142 | ||
|
48113b7bd9 | ||
|
c13f115473 | ||
|
1e20f9f1d8 | ||
|
bc461d9baa | ||
|
5016e30cf2 | ||
|
f42ac5f2c0 | ||
|
2a60414031 | ||
|
9a2a304860 | ||
|
feb74a5e86 | ||
|
c0e350b734 | ||
|
bef1183c49 | ||
|
f935f5cf46 | ||
|
07388d327f | ||
|
4de16b2d17 | ||
|
da068a43c1 | ||
|
9657463717 | ||
|
69036cc6a4 | ||
|
700e084101 | ||
|
a1dc47b826 | ||
|
86de0ca17b | ||
|
80414f8452 | ||
|
fc0e239bdf | ||
|
928ad6c1d8 | ||
|
9d027b96d8 | ||
|
ddd49596ba | ||
|
b8cabadd43 | ||
|
ce42b07a80 | ||
|
bfd93e5b13 | ||
|
a797459560 | ||
|
6cbb683f99 | ||
|
92bbb98d48 | ||
|
834c847746 | ||
|
97aa407fe4 | ||
|
86a254ad9e | ||
|
64c38856cc | ||
|
b4f6206eda | ||
|
82f828a327 | ||
|
d8116a80df | ||
|
e0aec8d373 | ||
|
1ce2587330 | ||
|
20964ac2d8 | ||
|
71a10e0378 | ||
|
9bf13b7872 | ||
|
d420992f8c | ||
|
c259a0e3e2 | ||
|
432be274ba | ||
|
484bf5b703 | ||
|
979b6305af | ||
|
4bf32af60e | ||
|
0e4a746eeb | ||
|
2fe919cc5e | ||
|
bcd750695f | ||
|
19b6bb0fd6 | ||
|
60f6a350be | ||
|
f571df7367 | ||
|
de51bc782e | ||
|
c5aef60bd7 | ||
|
8b07ecb937 | ||
|
6f52104324 | ||
|
1d7f704754 |
@@ -9,10 +9,6 @@ data
|
||||
.idea
|
||||
*.iml
|
||||
|
||||
# Git files
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# Documentation
|
||||
*.md
|
||||
|
||||
|
@@ -4,12 +4,23 @@
|
||||
## Main data folder
|
||||
# DATA_FOLDER=data
|
||||
|
||||
## Individual folders, these override %DATA_FOLDER%
|
||||
## Database URL
|
||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||
## When using MySQL, this it is the URL to the DB, including username and password:
|
||||
## Format: mysql://[user[:password]@]host/database_name
|
||||
# DATABASE_URL=data/db.sqlite3
|
||||
|
||||
## Individual folders, these override %DATA_FOLDER%
|
||||
# RSA_KEY_FILENAME=data/rsa_key
|
||||
# ICON_CACHE_FOLDER=data/icon_cache
|
||||
# ATTACHMENTS_FOLDER=data/attachments
|
||||
|
||||
## Templates data folder, by default uses embedded templates
|
||||
## Check source code to see the format
|
||||
# TEMPLATES_FOLDER=/path/to/templates
|
||||
## Automatically reload the templates for every request, slow, use only for development
|
||||
# RELOAD_TEMPLATES=false
|
||||
|
||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||
# ICON_CACHE_TTL=2592000
|
||||
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
||||
@@ -19,6 +30,9 @@
|
||||
# WEB_VAULT_FOLDER=web-vault/
|
||||
# WEB_VAULT_ENABLED=true
|
||||
|
||||
## Enables websocket notifications
|
||||
# WEBSOCKET_ENABLED=false
|
||||
|
||||
## Controls the WebSocket server address and port
|
||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||
# WEBSOCKET_PORT=3012
|
||||
@@ -26,7 +40,7 @@
|
||||
## Enable extended logging
|
||||
## This shows timestamps and allows logging to file and to syslog
|
||||
### To enable logging to file, use the LOG_FILE env variable
|
||||
### To enable syslog, you need to compile with `cargo build --features=enable_syslog'
|
||||
### To enable syslog, use the USE_SYSLOG env variable
|
||||
# EXTENDED_LOGGING=true
|
||||
|
||||
## Logging to file
|
||||
@@ -34,11 +48,45 @@
|
||||
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||
# LOG_FILE=/path/to/log
|
||||
|
||||
## Use a local favicon extractor
|
||||
## Set to false to use bitwarden's official icon servers
|
||||
## Set to true to use the local version, which is not as smart,
|
||||
## but it doesn't send the cipher domains to bitwarden's servers
|
||||
# LOCAL_ICON_EXTRACTOR=false
|
||||
## Logging to Syslog
|
||||
## This requires extended logging
|
||||
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||
# USE_SYSLOG=false
|
||||
|
||||
## Log level
|
||||
## Change the verbosity of the log output
|
||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||
## This requires extended logging
|
||||
# LOG_LEVEL=Info
|
||||
|
||||
## Enable WAL for the DB
|
||||
## Set to false to avoid enabling WAL during startup.
|
||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||
## this setting only prevents bitwarden_rs from automatically enabling it on start.
|
||||
## Please read project wiki page about this setting first before changing the value as it can
|
||||
## cause performance degradation or might render the service unable to start.
|
||||
# ENABLE_DB_WAL=true
|
||||
|
||||
## Disable icon downloading
|
||||
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
||||
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||
## otherwise it will delete them and they won't be downloaded again.
|
||||
# DISABLE_ICON_DOWNLOAD=false
|
||||
|
||||
## Icon download timeout
|
||||
## Configure the timeout value when downloading the favicons.
|
||||
## The default is 10 seconds, but this could be to low on slower network connections
|
||||
# ICON_DOWNLOAD_TIMEOUT=10
|
||||
|
||||
## Icon blacklist Regex
|
||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||
# ICON_BLACKLIST_REGEX=192\.168\.1\.[0-9].*^
|
||||
|
||||
## Disable 2FA remember
|
||||
## Enabling this would force the users to use a second factor to login every time.
|
||||
## Note that the checkbox would still be present, but ignored.
|
||||
# DISABLE_2FA_REMEMBER=false
|
||||
|
||||
## Controls if new users can register
|
||||
# SIGNUPS_ALLOWED=true
|
||||
@@ -47,6 +95,7 @@
|
||||
## One option is to use 'openssl rand -base64 48'
|
||||
## If not set, the admin panel is disabled
|
||||
# ADMIN_TOKEN=Vy2VyYTTsKPv8W5aEOWUbB/Bt3DEKePbHmI4m9VcemUMS2rEviDowNAFqYi1xjmp
|
||||
# DISABLE_ADMIN_TOKEN=false
|
||||
|
||||
## Invitations org admins to invite users, even when signups are disabled
|
||||
# INVITATIONS_ALLOWED=true
|
||||
@@ -60,7 +109,8 @@
|
||||
|
||||
## Domain settings
|
||||
## The domain must match the address from where you access the server
|
||||
## Unless you are using U2F, or having problems with attachments not downloading, there is no need to change this
|
||||
## It's recommended to configure this value, otherwise certain functionality might not work,
|
||||
## like attachment downloads, email links and U2F.
|
||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||
# DOMAIN=https://bw.domain.tld:8443
|
||||
|
||||
@@ -72,6 +122,17 @@
|
||||
# YUBICO_SECRET_KEY=AAAAAAAAAAAAAAAAAAAAAAAA
|
||||
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
|
||||
|
||||
## Duo Settings
|
||||
## You need to configure all options to enable global Duo support, otherwise users would need to configure it themselves
|
||||
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
|
||||
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
|
||||
## Then set the following options, based on the values obtained from the last step:
|
||||
# DUO_IKEY=<Integration Key>
|
||||
# DUO_SKEY=<Secret Key>
|
||||
# DUO_HOST=<API Hostname>
|
||||
## After that, you should be able to follow the rest of the guide linked above,
|
||||
## ignoring the fields that ask for the values that you already configured beforehand.
|
||||
|
||||
## Rocket specific settings, check Rocket documentation to learn more
|
||||
# ROCKET_ENV=staging
|
||||
# ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app
|
||||
@@ -79,10 +140,13 @@
|
||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||
|
||||
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
|
||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
||||
# SMTP_HOST=smtp.domain.tld
|
||||
# SMTP_FROM=bitwarden-rs@domain.tld
|
||||
# SMTP_FROM_NAME=Bitwarden_RS
|
||||
# SMTP_PORT=587
|
||||
# SMTP_SSL=true
|
||||
# SMTP_USERNAME=username
|
||||
# SMTP_PASSWORD=password
|
||||
# SMTP_PASSWORD=password
|
||||
# SMTP_AUTH_MECHANISM="Plain"
|
||||
|
7
.hadolint.yaml
Normal file
7
.hadolint.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
ignored:
|
||||
# disable explicit version for apt install
|
||||
- DL3008
|
||||
# disable explicit version for apk install
|
||||
- DL3018
|
||||
trustedRegistries:
|
||||
- docker.io
|
23
.travis.yml
23
.travis.yml
@@ -1,9 +1,20 @@
|
||||
# Copied from Rocket's .travis.yml
|
||||
dist: xenial
|
||||
|
||||
env:
|
||||
global:
|
||||
- HADOLINT_VERSION=1.17.1
|
||||
|
||||
language: rust
|
||||
sudo: required # so we get a VM with higher specs
|
||||
dist: trusty # so we get a VM with higher specs
|
||||
rust: nightly
|
||||
cache: cargo
|
||||
rust:
|
||||
- nightly
|
||||
|
||||
before_install:
|
||||
- sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint
|
||||
- sudo chmod +rx /usr/local/bin/hadolint
|
||||
|
||||
# Nothing to install
|
||||
install: true
|
||||
script:
|
||||
- cargo build --verbose --all-features
|
||||
- git ls-files --exclude='Dockerfile*' --ignored | xargs --max-lines=1 hadolint
|
||||
- cargo build --features "sqlite"
|
||||
- cargo build --features "mysql"
|
||||
|
2585
Cargo.lock
generated
2585
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
90
Cargo.toml
90
Cargo.toml
@@ -11,55 +11,58 @@ publish = false
|
||||
build = "build.rs"
|
||||
|
||||
[features]
|
||||
default = ["enable_yubikey"]
|
||||
enable_syslog = ["syslog", "fern/syslog-4"]
|
||||
enable_yubikey = ["yubico"]
|
||||
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
||||
enable_syslog = []
|
||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||
|
||||
[target."cfg(not(windows))".dependencies]
|
||||
syslog = "4.0.1"
|
||||
|
||||
[dependencies]
|
||||
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
|
||||
rocket = { version = "0.4.0", features = ["tls"], default-features = false }
|
||||
rocket_contrib = "0.4.0"
|
||||
rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
|
||||
rocket_contrib = "0.5.0-dev"
|
||||
|
||||
# HTTP client
|
||||
reqwest = "0.9.6"
|
||||
reqwest = "0.9.19"
|
||||
|
||||
# multipart/form-data support
|
||||
multipart = "0.15.4"
|
||||
multipart = { version = "0.16.1", features = ["server"], default-features = false }
|
||||
|
||||
# WebSockets library
|
||||
ws = "0.7.9"
|
||||
ws = "0.9.0"
|
||||
|
||||
# MessagePack library
|
||||
rmpv = "0.4.0"
|
||||
|
||||
# Concurrent hashmap implementation
|
||||
chashmap = "2.2.0"
|
||||
chashmap = "2.2.2"
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = "1.0.84"
|
||||
serde_derive = "1.0.84"
|
||||
serde_json = "1.0.34"
|
||||
serde = "1.0.99"
|
||||
serde_derive = "1.0.99"
|
||||
serde_json = "1.0.40"
|
||||
|
||||
# Logging
|
||||
log = "0.4.6"
|
||||
fern = "0.5.7"
|
||||
syslog = { version = "4.0.1", optional = true }
|
||||
log = "0.4.8"
|
||||
fern = { version = "0.5.8", features = ["syslog-4"] }
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "1.3.3", features = ["sqlite", "chrono", "r2d2"] }
|
||||
diesel_migrations = { version = "1.3.0", features = ["sqlite"] }
|
||||
diesel = { version = "1.4.2", features = [ "chrono", "r2d2"] }
|
||||
diesel_migrations = "1.4.0"
|
||||
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.9.3", features = ["bundled"] }
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.12.0", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto library
|
||||
ring = { version = "0.13.5", features = ["rsa_signing"] }
|
||||
ring = "0.14.6"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "0.7.1", features = ["v4"] }
|
||||
uuid = { version = "0.7.4", features = ["v4"] }
|
||||
|
||||
# Date and time library for Rust
|
||||
chrono = "0.4.6"
|
||||
chrono = "0.4.7"
|
||||
|
||||
# TOTP library
|
||||
oath = "0.10.2"
|
||||
@@ -68,42 +71,47 @@ oath = "0.10.2"
|
||||
data-encoding = "2.1.2"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "5.0.1"
|
||||
jsonwebtoken = "6.0.1"
|
||||
|
||||
# U2F library
|
||||
u2f = "0.1.4"
|
||||
u2f = "0.1.6"
|
||||
|
||||
# Yubico Library
|
||||
yubico = { version = "=0.4.0", features = ["online"], default-features = false, optional = true }
|
||||
yubico = { version = "0.6.1", features = ["online", "online-tokio"], default-features = false }
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenv = { version = "0.13.0", default-features = false }
|
||||
dotenv = { version = "0.14.1", default-features = false }
|
||||
|
||||
# Lazy static macro
|
||||
lazy_static = { version = "1.2.0", features = ["nightly"] }
|
||||
lazy_static = "1.3.0"
|
||||
|
||||
# More derives
|
||||
derive_more = "0.13.0"
|
||||
derive_more = "0.15.0"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.6"
|
||||
num-derive = "0.2.3"
|
||||
num-traits = "0.2.8"
|
||||
num-derive = "0.2.5"
|
||||
|
||||
# Email libraries
|
||||
lettre = "0.9.0"
|
||||
lettre_email = "0.9.0"
|
||||
native-tls = "0.2.2"
|
||||
lettre = "0.9.2"
|
||||
lettre_email = "0.9.2"
|
||||
native-tls = "0.2.3"
|
||||
quoted_printable = "0.4.1"
|
||||
|
||||
# Number encoding library
|
||||
byteorder = "1.2.7"
|
||||
# Template library
|
||||
handlebars = "2.0.1"
|
||||
|
||||
# For favicon extraction from main website
|
||||
soup = "0.4.1"
|
||||
regex = "1.2.1"
|
||||
|
||||
# URL encoding library
|
||||
percent-encoding = "2.1.0"
|
||||
|
||||
[patch.crates-io]
|
||||
# Add support for Timestamp type
|
||||
rmp = { git = 'https://github.com/dani-garcia/msgpack-rust' }
|
||||
|
||||
# Use new native_tls version 0.2
|
||||
lettre = { git = 'https://github.com/lettre/lettre', rev = 'c988b1760ad81' }
|
||||
lettre_email = { git = 'https://github.com/lettre/lettre', rev = 'c988b1760ad81' }
|
||||
|
||||
# Allows optional libusb support
|
||||
yubico = { git = 'https://github.com/dani-garcia/yubico-rs' }
|
||||
# Use newest ring
|
||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dbcb0a75b9556763ac3ab708f40c8f8ed75f1a1e' }
|
||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dbcb0a75b9556763ac3ab708f40c8f8ed75f1a1e' }
|
||||
|
86
Dockerfile
86
Dockerfile
@@ -1,86 +0,0 @@
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.8.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --update-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust as build
|
||||
|
||||
# Using bundled SQLite, no need to install it
|
||||
# RUN apt-get update && apt-get install -y\
|
||||
# sqlite3\
|
||||
# --no-install-recommends\
|
||||
# && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:stretch-slim
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y\
|
||||
openssl\
|
||||
ca-certificates\
|
||||
--no-install-recommends\
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build app/target/release/bitwarden_rs .
|
||||
|
||||
# Configures the startup!
|
||||
CMD ./bitwarden_rs
|
1
Dockerfile
Symbolic link
1
Dockerfile
Symbolic link
@@ -0,0 +1 @@
|
||||
docker/amd64/sqlite/Dockerfile
|
10
README.md
10
README.md
@@ -3,7 +3,7 @@
|
||||
---
|
||||
|
||||
[](https://travis-ci.org/dani-garcia/bitwarden_rs)
|
||||
[](https://hub.docker.com/r/mprasil/bitwarden)
|
||||
[](https://hub.docker.com/r/bitwardenrs/server)
|
||||
[](https://deps.rs/repo/github/dani-garcia/bitwarden_rs)
|
||||
[](https://github.com/dani-garcia/bitwarden_rs/releases/latest)
|
||||
[](https://github.com/dani-garcia/bitwarden_rs/blob/master/LICENSE.txt)
|
||||
@@ -11,7 +11,9 @@
|
||||
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/bitwarden_rs).
|
||||
|
||||
_*Note, that this project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC._
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||
|
||||
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any Bitwarden related bug-reports or suggestions [here](https://github.com/dani-garcia/bitwarden_rs/issues/new), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
||||
|
||||
---
|
||||
|
||||
@@ -32,8 +34,8 @@ Basically full implementation of Bitwarden API is provided including:
|
||||
Pull the docker image and mount a volume from the host for persistent storage:
|
||||
|
||||
```sh
|
||||
docker pull mprasil/bitwarden:latest
|
||||
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 mprasil/bitwarden:latest
|
||||
docker pull bitwardenrs/server:latest
|
||||
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 bitwardenrs/server:latest
|
||||
```
|
||||
This will preserve any persistent data under /bw-data/, you can adapt the path to whatever suits you.
|
||||
|
||||
|
25
azure-pipelines.yml
Normal file
25
azure-pipelines.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
pool:
|
||||
vmImage: 'Ubuntu-16.04'
|
||||
|
||||
steps:
|
||||
- script: |
|
||||
ls -la
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $(cat rust-toolchain)
|
||||
echo "##vso[task.prependpath]$HOME/.cargo/bin"
|
||||
displayName: 'Install Rust'
|
||||
|
||||
- script: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libmysql++-dev
|
||||
displayName: Install libmysql
|
||||
|
||||
- script: |
|
||||
rustc -Vv
|
||||
cargo -V
|
||||
displayName: Query rust and cargo versions
|
||||
|
||||
- script : cargo build --features "sqlite"
|
||||
displayName: 'Build project with sqlite backend'
|
||||
|
||||
- script : cargo build --features "mysql"
|
||||
displayName: 'Build project with mysql backend'
|
32
build.rs
32
build.rs
@@ -1,11 +1,21 @@
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
read_git_info().expect("Unable to read Git info");
|
||||
#[cfg(all(feature = "sqlite", feature = "mysql"))]
|
||||
compile_error!("Can't enable both backends");
|
||||
|
||||
#[cfg(not(any(feature = "sqlite", feature = "mysql")))]
|
||||
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
||||
|
||||
read_git_info().ok();
|
||||
}
|
||||
|
||||
fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
||||
let out = Command::new(args[0]).args(&args[1..]).output()?;
|
||||
if !out.status.success() {
|
||||
use std::io::{Error, ErrorKind};
|
||||
return Err(Error::new(ErrorKind::Other, "Command not successful"));
|
||||
}
|
||||
Ok(String::from_utf8(out.stdout).unwrap().trim().to_string())
|
||||
}
|
||||
|
||||
@@ -13,8 +23,10 @@ fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
||||
fn read_git_info() -> Result<(), std::io::Error> {
|
||||
// The exact tag for the current commit, can be empty when
|
||||
// the current commit doesn't have an associated tag
|
||||
let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"])?;
|
||||
println!("cargo:rustc-env=GIT_EXACT_TAG={}", exact_tag);
|
||||
let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"]).ok();
|
||||
if let Some(ref exact) = exact_tag {
|
||||
println!("cargo:rustc-env=GIT_EXACT_TAG={}", exact);
|
||||
}
|
||||
|
||||
// The last available tag, equal to exact_tag when
|
||||
// the current commit is tagged
|
||||
@@ -27,13 +39,25 @@ fn read_git_info() -> Result<(), std::io::Error> {
|
||||
|
||||
// The current git commit hash
|
||||
let rev = run(&["git", "rev-parse", "HEAD"])?;
|
||||
let rev_short = rev.get(..12).unwrap_or_default();
|
||||
let rev_short = rev.get(..8).unwrap_or_default();
|
||||
println!("cargo:rustc-env=GIT_REV={}", rev_short);
|
||||
|
||||
// Combined version
|
||||
let version = if let Some(exact) = exact_tag {
|
||||
exact
|
||||
} else if &branch != "master" {
|
||||
format!("{}-{} ({})", last_tag, rev_short, branch)
|
||||
} else {
|
||||
format!("{}-{}", last_tag, rev_short)
|
||||
};
|
||||
println!("cargo:rustc-env=GIT_VERSION={}", version);
|
||||
|
||||
// To access these values, use:
|
||||
// env!("GIT_EXACT_TAG")
|
||||
// env!("GIT_LAST_TAG")
|
||||
// env!("GIT_BRANCH")
|
||||
// env!("GIT_REV")
|
||||
// env!("GIT_VERSION")
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -2,29 +2,35 @@
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine as vault
|
||||
FROM alpine:3.10 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.8.0b"
|
||||
ENV VAULT_VERSION "v2.11.0"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --update-cache --upgrade \
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust as build
|
||||
FROM rust:1.36 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-aarch64-linux-gnu \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||
@@ -41,8 +47,10 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
&& dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
libc6-dev:arm64
|
||||
libc6-dev:arm64 \
|
||||
libmariadb-dev:arm64
|
||||
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
@@ -55,8 +63,7 @@ COPY . .
|
||||
|
||||
# Build
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
# TODO: Enable yubico when #262 is fixed
|
||||
RUN cargo build --release --target=aarch64-unknown-linux-gnu -v --no-default-features
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu -v
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
@@ -70,10 +77,11 @@ ENV ROCKET_WORKERS=10
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y\
|
||||
openssl\
|
||||
ca-certificates\
|
||||
--no-install-recommends\
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
@@ -90,4 +98,4 @@ COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||
|
||||
# Configures the startup!
|
||||
CMD ./bitwarden_rs
|
||||
CMD ["./bitwarden_rs"]
|
101
docker/aarch64/sqlite/Dockerfile
Normal file
101
docker/aarch64/sqlite/Dockerfile
Normal file
@@ -0,0 +1,101 @@
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.10 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.11.0"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.36 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-aarch64-linux-gnu \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Prepare openssl arm64 libs
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
libc6-dev:arm64 \
|
||||
libmariadb-dev:arm64
|
||||
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu -v
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:stretch
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||
|
||||
# Configures the startup!
|
||||
CMD ["./bitwarden_rs"]
|
98
docker/amd64/mysql/Dockerfile
Normal file
98
docker/amd64/mysql/Dockerfile
Normal file
@@ -0,0 +1,98 @@
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.10 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.11.0"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.36 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
# Using bundled SQLite, no need to install it
|
||||
# RUN apt-get update && apt-get install -y\
|
||||
# --no-install-recommends \
|
||||
# sqlite3\
|
||||
# && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install MySQL package
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:stretch-slim
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build app/target/release/bitwarden_rs .
|
||||
|
||||
# Configures the startup!
|
||||
CMD ["./bitwarden_rs"]
|
@@ -2,28 +2,39 @@
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine as vault
|
||||
FROM alpine:3.10 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.8.0b"
|
||||
ENV VAULT_VERSION "v2.11.0"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --update-cache --upgrade \
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# Musl build image for statically compiled binary
|
||||
FROM clux/muslrust:nightly-2018-12-01 as build
|
||||
FROM clux/muslrust:nightly-2019-07-08 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
ENV USER "root"
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmysqlclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copies the complete project
|
||||
@@ -32,13 +43,16 @@ COPY . .
|
||||
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Build
|
||||
RUN cargo build --release
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.8
|
||||
FROM alpine:3.10
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
@@ -46,10 +60,10 @@ ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# Install needed libraries
|
||||
RUN apk add \
|
||||
openssl\
|
||||
ca-certificates \
|
||||
&& rm /var/cache/apk/*
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
mariadb-connector-c \
|
||||
ca-certificates
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
@@ -63,4 +77,4 @@ COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||
|
||||
# Configures the startup!
|
||||
CMD ./bitwarden_rs
|
||||
CMD ["./bitwarden_rs"]
|
98
docker/amd64/sqlite/Dockerfile
Normal file
98
docker/amd64/sqlite/Dockerfile
Normal file
@@ -0,0 +1,98 @@
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.10 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.11.0"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.36 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
# Using bundled SQLite, no need to install it
|
||||
# RUN apt-get update && apt-get install -y\
|
||||
# --no-install-recommends \
|
||||
# sqlite3 \
|
||||
# && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install MySQL package
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:stretch-slim
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build app/target/release/bitwarden_rs .
|
||||
|
||||
# Configures the startup!
|
||||
CMD ["./bitwarden_rs"]
|
80
docker/amd64/sqlite/Dockerfile.alpine
Normal file
80
docker/amd64/sqlite/Dockerfile.alpine
Normal file
@@ -0,0 +1,80 @@
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.10 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.11.0"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# Musl build image for statically compiled binary
|
||||
FROM clux/muslrust:nightly-2019-07-08 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
ENV USER "root"
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmysqlclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Build
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.10
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# Install needed libraries
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
mariadb-connector-c \
|
||||
ca-certificates
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||
|
||||
# Configures the startup!
|
||||
CMD ["./bitwarden_rs"]
|
101
docker/armv6/mysql/Dockerfile
Normal file
101
docker/armv6/mysql/Dockerfile
Normal file
@@ -0,0 +1,101 @@
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.10 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.11.0"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.36 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabi \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Prepare openssl armel libs
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
libc6-dev:armel \
|
||||
libmariadb-dev:armel
|
||||
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
RUN rustup target add arm-unknown-linux-gnueabi
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi -v
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:stretch
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||
|
||||
# Configures the startup!
|
||||
CMD ["./bitwarden_rs"]
|
101
docker/armv6/sqlite/Dockerfile
Normal file
101
docker/armv6/sqlite/Dockerfile
Normal file
@@ -0,0 +1,101 @@
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.10 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.11.0"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.36 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabi \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Prepare openssl armel libs
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
libc6-dev:armel \
|
||||
libmariadb-dev:armel
|
||||
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
RUN rustup target add arm-unknown-linux-gnueabi
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi -v
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:stretch
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||
|
||||
# Configures the startup!
|
||||
CMD ["./bitwarden_rs"]
|
@@ -2,29 +2,35 @@
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine as vault
|
||||
FROM alpine:3.10 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.8.0b"
|
||||
ENV VAULT_VERSION "v2.11.0"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --update-cache --upgrade \
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust as build
|
||||
FROM rust:1.36 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||
@@ -41,8 +47,11 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
&& dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
libc6-dev:armhf
|
||||
libc6-dev:armhf \
|
||||
libmariadb-dev:armhf
|
||||
|
||||
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
@@ -55,7 +64,7 @@ COPY . .
|
||||
|
||||
# Build
|
||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
RUN cargo build --release --target=armv7-unknown-linux-gnueabihf -v
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf -v
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
@@ -69,11 +78,12 @@ ENV ROCKET_WORKERS=10
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y\
|
||||
openssl\
|
||||
ca-certificates\
|
||||
--no-install-recommends\
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
@@ -89,4 +99,4 @@ COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||
|
||||
# Configures the startup!
|
||||
CMD ./bitwarden_rs
|
||||
CMD ["./bitwarden_rs"]
|
101
docker/armv7/sqlite/Dockerfile
Normal file
101
docker/armv7/sqlite/Dockerfile
Normal file
@@ -0,0 +1,101 @@
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.10 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.11.0"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.36 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Prepare openssl armhf libs
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
libc6-dev:armhf \
|
||||
libmariadb-dev:armhf
|
||||
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf -v
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:stretch
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||
|
||||
# Configures the startup!
|
||||
CMD ["./bitwarden_rs"]
|
62
migrations/mysql/2018-01-14-171611_create_tables/up.sql
Normal file
62
migrations/mysql/2018-01-14-171611_create_tables/up.sql
Normal file
@@ -0,0 +1,62 @@
|
||||
CREATE TABLE users (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
email VARCHAR(255) NOT NULL UNIQUE,
|
||||
name TEXT NOT NULL,
|
||||
password_hash BLOB NOT NULL,
|
||||
salt BLOB NOT NULL,
|
||||
password_iterations INTEGER NOT NULL,
|
||||
password_hint TEXT,
|
||||
`key` TEXT NOT NULL,
|
||||
private_key TEXT,
|
||||
public_key TEXT,
|
||||
totp_secret TEXT,
|
||||
totp_recover TEXT,
|
||||
security_stamp TEXT NOT NULL,
|
||||
equivalent_domains TEXT NOT NULL,
|
||||
excluded_globals TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE devices (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||
name TEXT NOT NULL,
|
||||
type INTEGER NOT NULL,
|
||||
push_token TEXT,
|
||||
refresh_token TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE ciphers (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||
folder_uuid CHAR(36) REFERENCES folders (uuid),
|
||||
organization_uuid CHAR(36),
|
||||
type INTEGER NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
fields TEXT,
|
||||
data TEXT NOT NULL,
|
||||
favorite BOOLEAN NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE attachments (
|
||||
id CHAR(36) NOT NULL PRIMARY KEY,
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
|
||||
file_name TEXT NOT NULL,
|
||||
file_size INTEGER NOT NULL
|
||||
|
||||
);
|
||||
|
||||
CREATE TABLE folders (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||
name TEXT NOT NULL
|
||||
);
|
||||
|
@@ -0,0 +1,30 @@
|
||||
CREATE TABLE collections (
|
||||
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
|
||||
org_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
|
||||
name TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE organizations (
|
||||
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
billing_email TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE users_collections (
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
|
||||
PRIMARY KEY (user_uuid, collection_uuid)
|
||||
);
|
||||
|
||||
CREATE TABLE users_organizations (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
|
||||
|
||||
access_all BOOLEAN NOT NULL,
|
||||
`key` TEXT NOT NULL,
|
||||
status INTEGER NOT NULL,
|
||||
type INTEGER NOT NULL,
|
||||
|
||||
UNIQUE (user_uuid, org_uuid)
|
||||
);
|
@@ -0,0 +1,34 @@
|
||||
ALTER TABLE ciphers RENAME TO oldCiphers;
|
||||
|
||||
CREATE TABLE ciphers (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid CHAR(36) REFERENCES users (uuid), -- Make this optional
|
||||
organization_uuid CHAR(36) REFERENCES organizations (uuid), -- Add reference to orgs table
|
||||
-- Remove folder_uuid
|
||||
type INTEGER NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
fields TEXT,
|
||||
data TEXT NOT NULL,
|
||||
favorite BOOLEAN NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE folders_ciphers (
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
|
||||
folder_uuid CHAR(36) NOT NULL REFERENCES folders (uuid),
|
||||
|
||||
PRIMARY KEY (cipher_uuid, folder_uuid)
|
||||
);
|
||||
|
||||
INSERT INTO ciphers (uuid, created_at, updated_at, user_uuid, organization_uuid, type, name, notes, fields, data, favorite)
|
||||
SELECT uuid, created_at, updated_at, user_uuid, organization_uuid, type, name, notes, fields, data, favorite FROM oldCiphers;
|
||||
|
||||
INSERT INTO folders_ciphers (cipher_uuid, folder_uuid)
|
||||
SELECT uuid, folder_uuid FROM oldCiphers WHERE folder_uuid IS NOT NULL;
|
||||
|
||||
|
||||
DROP TABLE oldCiphers;
|
||||
|
||||
ALTER TABLE users_collections ADD COLUMN read_only BOOLEAN NOT NULL DEFAULT 0; -- False
|
@@ -0,0 +1,5 @@
|
||||
CREATE TABLE ciphers_collections (
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
|
||||
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
|
||||
PRIMARY KEY (cipher_uuid, collection_uuid)
|
||||
);
|
@@ -0,0 +1,14 @@
|
||||
ALTER TABLE attachments RENAME TO oldAttachments;
|
||||
|
||||
CREATE TABLE attachments (
|
||||
id CHAR(36) NOT NULL PRIMARY KEY,
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
|
||||
file_name TEXT NOT NULL,
|
||||
file_size INTEGER NOT NULL
|
||||
|
||||
);
|
||||
|
||||
INSERT INTO attachments (id, cipher_uuid, file_name, file_size)
|
||||
SELECT id, cipher_uuid, file_name, file_size FROM oldAttachments;
|
||||
|
||||
DROP TABLE oldAttachments;
|
@@ -0,0 +1,15 @@
|
||||
CREATE TABLE twofactor (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||
type INTEGER NOT NULL,
|
||||
enabled BOOLEAN NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
|
||||
UNIQUE (user_uuid, type)
|
||||
);
|
||||
|
||||
|
||||
INSERT INTO twofactor (uuid, user_uuid, type, enabled, data)
|
||||
SELECT UUID(), uuid, 0, 1, u.totp_secret FROM users u where u.totp_secret IS NOT NULL;
|
||||
|
||||
UPDATE users SET totp_secret = NULL; -- Instead of recreating the table, just leave the columns empty
|
3
migrations/mysql/2018-09-10-111213_add_invites/up.sql
Normal file
3
migrations/mysql/2018-09-10-111213_add_invites/up.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
CREATE TABLE invitations (
|
||||
email VARCHAR(255) NOT NULL PRIMARY KEY
|
||||
);
|
@@ -0,0 +1,3 @@
|
||||
ALTER TABLE attachments
|
||||
ADD COLUMN
|
||||
`key` TEXT;
|
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE attachments CHANGE COLUMN akey `key` TEXT;
|
||||
ALTER TABLE ciphers CHANGE COLUMN atype type INTEGER NOT NULL;
|
||||
ALTER TABLE devices CHANGE COLUMN atype type INTEGER NOT NULL;
|
||||
ALTER TABLE twofactor CHANGE COLUMN atype type INTEGER NOT NULL;
|
||||
ALTER TABLE users CHANGE COLUMN akey `key` TEXT;
|
||||
ALTER TABLE users_organizations CHANGE COLUMN akey `key` TEXT;
|
||||
ALTER TABLE users_organizations CHANGE COLUMN atype type INTEGER NOT NULL;
|
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE attachments CHANGE COLUMN `key` akey TEXT;
|
||||
ALTER TABLE ciphers CHANGE COLUMN type atype INTEGER NOT NULL;
|
||||
ALTER TABLE devices CHANGE COLUMN type atype INTEGER NOT NULL;
|
||||
ALTER TABLE twofactor CHANGE COLUMN type atype INTEGER NOT NULL;
|
||||
ALTER TABLE users CHANGE COLUMN `key` akey TEXT;
|
||||
ALTER TABLE users_organizations CHANGE COLUMN `key` akey TEXT;
|
||||
ALTER TABLE users_organizations CHANGE COLUMN type atype INTEGER NOT NULL;
|
@@ -0,0 +1,9 @@
|
||||
DROP TABLE users;
|
||||
|
||||
DROP TABLE devices;
|
||||
|
||||
DROP TABLE ciphers;
|
||||
|
||||
DROP TABLE attachments;
|
||||
|
||||
DROP TABLE folders;
|
@@ -0,0 +1,8 @@
|
||||
DROP TABLE collections;
|
||||
|
||||
DROP TABLE organizations;
|
||||
|
||||
|
||||
DROP TABLE users_collections;
|
||||
|
||||
DROP TABLE users_organizations;
|
@@ -0,0 +1 @@
|
||||
DROP TABLE ciphers_collections;
|
@@ -0,0 +1 @@
|
||||
-- This file should undo anything in `up.sql`
|
@@ -0,0 +1,3 @@
|
||||
ALTER TABLE devices
|
||||
ADD COLUMN
|
||||
twofactor_remember TEXT;
|
@@ -0,0 +1,8 @@
|
||||
UPDATE users
|
||||
SET totp_secret = (
|
||||
SELECT twofactor.data FROM twofactor
|
||||
WHERE twofactor.type = 0
|
||||
AND twofactor.user_uuid = users.uuid
|
||||
);
|
||||
|
||||
DROP TABLE twofactor;
|
@@ -0,0 +1,3 @@
|
||||
ALTER TABLE ciphers
|
||||
ADD COLUMN
|
||||
password_history TEXT;
|
1
migrations/sqlite/2018-09-10-111213_add_invites/down.sql
Normal file
1
migrations/sqlite/2018-09-10-111213_add_invites/down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE invitations;
|
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_type INTEGER NOT NULL DEFAULT 0; -- PBKDF2
|
||||
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_iter INTEGER NOT NULL DEFAULT 5000;
|
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE attachments RENAME COLUMN akey TO key;
|
||||
ALTER TABLE ciphers RENAME COLUMN atype TO type;
|
||||
ALTER TABLE devices RENAME COLUMN atype TO type;
|
||||
ALTER TABLE twofactor RENAME COLUMN atype TO type;
|
||||
ALTER TABLE users RENAME COLUMN akey TO key;
|
||||
ALTER TABLE users_organizations RENAME COLUMN akey TO key;
|
||||
ALTER TABLE users_organizations RENAME COLUMN atype TO type;
|
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE attachments RENAME COLUMN key TO akey;
|
||||
ALTER TABLE ciphers RENAME COLUMN type TO atype;
|
||||
ALTER TABLE devices RENAME COLUMN type TO atype;
|
||||
ALTER TABLE twofactor RENAME COLUMN type TO atype;
|
||||
ALTER TABLE users RENAME COLUMN key TO akey;
|
||||
ALTER TABLE users_organizations RENAME COLUMN key TO akey;
|
||||
ALTER TABLE users_organizations RENAME COLUMN type TO atype;
|
@@ -1 +1 @@
|
||||
nightly-2019-01-08
|
||||
nightly-2019-08-18
|
||||
|
293
src/api/admin.rs
293
src/api/admin.rs
@@ -1,24 +1,169 @@
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
use std::process::Command;
|
||||
|
||||
use crate::api::{JsonResult, JsonUpcase};
|
||||
use rocket::http::{Cookie, Cookies, SameSite};
|
||||
use rocket::request::{self, FlashMessage, Form, FromRequest, Request};
|
||||
use rocket::response::{content::Html, Flash, Redirect};
|
||||
use rocket::{Outcome, Route};
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::api::{ApiResult, EmptyResult, JsonResult};
|
||||
use crate::auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp};
|
||||
use crate::config::ConfigBuilder;
|
||||
use crate::db::{backup_database, models::*, DbConn};
|
||||
use crate::error::Error;
|
||||
use crate::mail;
|
||||
use crate::CONFIG;
|
||||
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
use crate::mail;
|
||||
|
||||
use rocket::request::{self, FromRequest, Request};
|
||||
use rocket::{Outcome, Route};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![get_users, invite_user, delete_user]
|
||||
if CONFIG.admin_token().is_none() && !CONFIG.disable_admin_token() {
|
||||
return routes![admin_disabled];
|
||||
}
|
||||
|
||||
routes![
|
||||
admin_login,
|
||||
get_users,
|
||||
post_admin_login,
|
||||
admin_page,
|
||||
invite_user,
|
||||
delete_user,
|
||||
deauth_user,
|
||||
remove_2fa,
|
||||
update_revision_users,
|
||||
post_config,
|
||||
delete_config,
|
||||
backup_db,
|
||||
]
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref CAN_BACKUP: bool = cfg!(feature = "sqlite") && Command::new("sqlite").arg("-version").status().is_ok();
|
||||
}
|
||||
|
||||
#[get("/")]
|
||||
fn admin_disabled() -> &'static str {
|
||||
"The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it"
|
||||
}
|
||||
|
||||
const COOKIE_NAME: &str = "BWRS_ADMIN";
|
||||
const ADMIN_PATH: &str = "/admin";
|
||||
|
||||
const BASE_TEMPLATE: &str = "admin/base";
|
||||
const VERSION: Option<&str> = option_env!("GIT_VERSION");
|
||||
|
||||
#[get("/", rank = 2)]
|
||||
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||
// If there is an error, show it
|
||||
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
||||
let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg});
|
||||
|
||||
// Return the page
|
||||
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
struct LoginForm {
|
||||
token: String,
|
||||
}
|
||||
|
||||
#[post("/", data = "<data>")]
|
||||
fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -> Result<Redirect, Flash<Redirect>> {
|
||||
let data = data.into_inner();
|
||||
|
||||
// If the token is invalid, redirect to login page
|
||||
if !_validate_token(&data.token) {
|
||||
error!("Invalid admin token. IP: {}", ip.ip);
|
||||
Err(Flash::error(
|
||||
Redirect::to(ADMIN_PATH),
|
||||
"Invalid admin token, please try again.",
|
||||
))
|
||||
} else {
|
||||
// If the token received is valid, generate JWT and save it as a cookie
|
||||
let claims = generate_admin_claims();
|
||||
let jwt = encode_jwt(&claims);
|
||||
|
||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||
.path(ADMIN_PATH)
|
||||
.max_age(chrono::Duration::minutes(20))
|
||||
.same_site(SameSite::Strict)
|
||||
.http_only(true)
|
||||
.finish();
|
||||
|
||||
cookies.add(cookie);
|
||||
Ok(Redirect::to(ADMIN_PATH))
|
||||
}
|
||||
}
|
||||
|
||||
fn _validate_token(token: &str) -> bool {
|
||||
match CONFIG.admin_token().as_ref() {
|
||||
None => false,
|
||||
Some(t) => crate::crypto::ct_eq(t.trim(), token.trim()),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct AdminTemplateData {
|
||||
page_content: String,
|
||||
version: Option<&'static str>,
|
||||
users: Vec<Value>,
|
||||
config: Value,
|
||||
can_backup: bool,
|
||||
}
|
||||
|
||||
impl AdminTemplateData {
|
||||
fn new(users: Vec<Value>) -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/page"),
|
||||
version: VERSION,
|
||||
users,
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
}
|
||||
}
|
||||
|
||||
fn render(self) -> Result<String, Error> {
|
||||
CONFIG.render_template(BASE_TEMPLATE, &self)
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/", rank = 1)]
|
||||
fn admin_page(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users = User::get_all(&conn);
|
||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||
|
||||
let text = AdminTemplateData::new(users_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct InviteData {
|
||||
Email: String,
|
||||
email: String,
|
||||
}
|
||||
|
||||
#[post("/invite", data = "<data>")]
|
||||
fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
let email = data.email.clone();
|
||||
if User::find_by_mail(&data.email, &conn).is_some() {
|
||||
err!("User already exists")
|
||||
}
|
||||
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!("Invitations are not allowed")
|
||||
}
|
||||
|
||||
let mut user = User::new(email);
|
||||
user.save(&conn)?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
let org_name = "bitwarden_rs";
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &org_name, None)
|
||||
} else {
|
||||
let invitation = Invitation::new(data.email);
|
||||
invitation.save(&conn)
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/users")]
|
||||
@@ -29,40 +174,64 @@ fn get_users(_token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
Ok(Json(Value::Array(users_json)))
|
||||
}
|
||||
|
||||
#[post("/invite", data = "<data>")]
|
||||
fn invite_user(data: JsonUpcase<InviteData>, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
let data: InviteData = data.into_inner().data;
|
||||
let email = data.Email.clone();
|
||||
if User::find_by_mail(&data.Email, &conn).is_some() {
|
||||
err!("User already exists")
|
||||
}
|
||||
|
||||
if !CONFIG.invitations_allowed {
|
||||
err!("Invitations are not allowed")
|
||||
}
|
||||
|
||||
if let Some(ref mail_config) = CONFIG.mail {
|
||||
let mut user = User::new(email);
|
||||
user.save(&conn)?;
|
||||
let org_name = "bitwarden_rs";
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &org_name, None, mail_config)?;
|
||||
} else {
|
||||
let mut invitation = Invitation::new(data.Email);
|
||||
invitation.save(&conn)?;
|
||||
}
|
||||
|
||||
Ok(Json(json!({})))
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/delete")]
|
||||
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let user = match User::find_by_uuid(&uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
|
||||
user.delete(&conn)?;
|
||||
Ok(Json(json!({})))
|
||||
user.delete(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/deauth")]
|
||||
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = match User::find_by_uuid(&uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/remove-2fa")]
|
||||
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = match User::find_by_uuid(&uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||
user.totp_recover = None;
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/update_revision")]
|
||||
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&conn)
|
||||
}
|
||||
|
||||
#[post("/config", data = "<data>")]
|
||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||
let data: ConfigBuilder = data.into_inner();
|
||||
CONFIG.update_config(data)
|
||||
}
|
||||
|
||||
#[post("/config/delete")]
|
||||
fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
CONFIG.delete_user_config()
|
||||
}
|
||||
|
||||
#[post("/config/backup_db")]
|
||||
fn backup_db(_token: AdminToken) -> EmptyResult {
|
||||
if *CAN_BACKUP {
|
||||
backup_database()
|
||||
} else {
|
||||
err!("Can't back up current DB (either it's not SQLite or the 'sqlite' binary is not present)");
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AdminToken {}
|
||||
@@ -71,37 +240,29 @@ impl<'a, 'r> FromRequest<'a, 'r> for AdminToken {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
let config_token = match CONFIG.admin_token.as_ref() {
|
||||
Some(token) => token,
|
||||
None => err_handler!("Admin panel is disabled"),
|
||||
};
|
||||
if CONFIG.disable_admin_token() {
|
||||
Outcome::Success(AdminToken {})
|
||||
} else {
|
||||
let mut cookies = request.cookies();
|
||||
|
||||
// Get access_token
|
||||
let access_token: &str = match request.headers().get_one("Authorization") {
|
||||
Some(a) => match a.rsplit("Bearer ").next() {
|
||||
Some(split) => split,
|
||||
None => err_handler!("No access token provided"),
|
||||
},
|
||||
None => err_handler!("No access token provided"),
|
||||
};
|
||||
let access_token = match cookies.get(COOKIE_NAME) {
|
||||
Some(cookie) => cookie.value(),
|
||||
None => return Outcome::Forward(()), // If there is no cookie, redirect to login
|
||||
};
|
||||
|
||||
// TODO: What authentication to use?
|
||||
// Option 1: Make it a config option
|
||||
// Option 2: Generate random token, and
|
||||
// Option 2a: Send it to admin email, like upstream
|
||||
// Option 2b: Print in console or save to data dir, so admin can check
|
||||
let ip = match request.guard::<ClientIp>() {
|
||||
Outcome::Success(ip) => ip.ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
|
||||
use crate::auth::ClientIp;
|
||||
if decode_admin(access_token).is_err() {
|
||||
// Remove admin cookie
|
||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||
error!("Invalid or expired admin JWT. IP: {}.", ip);
|
||||
return Outcome::Forward(());
|
||||
}
|
||||
|
||||
let ip = match request.guard::<ClientIp>() {
|
||||
Outcome::Success(ip) => ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
|
||||
if access_token != config_token {
|
||||
err_handler!("Invalid admin token", format!("IP: {}.", ip.ip))
|
||||
Outcome::Success(AdminToken {})
|
||||
}
|
||||
|
||||
Outcome::Success(AdminToken {})
|
||||
}
|
||||
}
|
||||
|
@@ -4,7 +4,7 @@ use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType};
|
||||
use crate::auth::{decode_invite_jwt, Headers, InviteJWTClaims};
|
||||
use crate::auth::{decode_invite, Headers};
|
||||
use crate::mail;
|
||||
|
||||
use crate::CONFIG;
|
||||
@@ -66,7 +66,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
|
||||
if let Some(token) = data.Token {
|
||||
let claims: InviteJWTClaims = decode_invite_jwt(&token)?;
|
||||
let claims = decode_invite(&token)?;
|
||||
if claims.email == data.Email {
|
||||
user
|
||||
} else {
|
||||
@@ -79,14 +79,14 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
|
||||
user
|
||||
} else if CONFIG.signups_allowed {
|
||||
} else if CONFIG.signups_allowed() {
|
||||
err!("Account with this email already exists")
|
||||
} else {
|
||||
err!("Registration not allowed")
|
||||
}
|
||||
}
|
||||
None => {
|
||||
if CONFIG.signups_allowed || Invitation::take(&data.Email, &conn) {
|
||||
if CONFIG.signups_allowed() || Invitation::take(&data.Email, &conn) {
|
||||
User::new(data.Email.clone())
|
||||
} else {
|
||||
err!("Registration not allowed")
|
||||
@@ -106,7 +106,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
|
||||
user.set_password(&data.MasterPasswordHash);
|
||||
user.key = data.Key;
|
||||
user.akey = data.Key;
|
||||
|
||||
// Add extra fields if present
|
||||
if let Some(name) = data.Name {
|
||||
@@ -204,7 +204,7 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
||||
}
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.key = data.Key;
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
@@ -231,7 +231,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
||||
user.client_kdf_iter = data.KdfIterations;
|
||||
user.client_kdf_type = data.Kdf;
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.key = data.Key;
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
@@ -306,7 +306,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
// Update user data
|
||||
let mut user = headers.user;
|
||||
|
||||
user.key = data.Key;
|
||||
user.akey = data.Key;
|
||||
user.private_key = Some(data.PrivateKey);
|
||||
user.reset_security_stamp();
|
||||
|
||||
@@ -322,6 +322,7 @@ fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
user.reset_security_stamp();
|
||||
user.save(&conn)
|
||||
}
|
||||
@@ -376,7 +377,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
||||
user.email = data.NewEmail;
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.key = data.Key;
|
||||
user.akey = data.Key;
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
@@ -419,9 +420,9 @@ fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResul
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
if let Some(ref mail_config) = CONFIG.mail {
|
||||
mail::send_password_hint(&data.Email, hint, mail_config)?;
|
||||
} else if CONFIG.show_password_hint {
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_password_hint(&data.Email, hint)?;
|
||||
} else if CONFIG.show_password_hint() {
|
||||
if let Some(hint) = hint {
|
||||
err!(format!("Your password hint is: {}", &hint));
|
||||
} else {
|
||||
|
@@ -74,10 +74,10 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let user_json = headers.user.to_json(&conn);
|
||||
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||
let folders_json: Vec<Value> = folders.iter().map(|c| c.to_json()).collect();
|
||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||
|
||||
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
||||
let collections_json: Vec<Value> = collections.iter().map(|c| c.to_json()).collect();
|
||||
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
|
||||
|
||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
||||
let ciphers_json: Vec<Value> = ciphers
|
||||
@@ -221,6 +221,10 @@ pub fn update_cipher_from_data(
|
||||
nt: &Notify,
|
||||
ut: UpdateType,
|
||||
) -> EmptyResult {
|
||||
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
|
||||
err!("Organization mismatch. Please resync the client before updating the cipher")
|
||||
}
|
||||
|
||||
if let Some(org_id) = data.OrganizationId {
|
||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||
None => err!("You don't have permission to add item to organization"),
|
||||
@@ -263,7 +267,7 @@ pub fn update_cipher_from_data(
|
||||
err!("Attachment is not owned by the cipher")
|
||||
}
|
||||
|
||||
saved_att.key = Some(attachment.Key);
|
||||
saved_att.akey = Some(attachment.Key);
|
||||
saved_att.file_name = attachment.FileName;
|
||||
|
||||
saved_att.save(&conn)?;
|
||||
@@ -300,10 +304,13 @@ pub fn update_cipher_from_data(
|
||||
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
||||
|
||||
cipher.save(&conn)?;
|
||||
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?;
|
||||
|
||||
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
||||
if ut != UpdateType::None {
|
||||
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
||||
}
|
||||
|
||||
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
use super::folders::FolderData;
|
||||
@@ -346,25 +353,18 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
|
||||
}
|
||||
|
||||
// Read and create the ciphers
|
||||
for (index, cipher_data) in data.Ciphers.into_iter().enumerate() {
|
||||
for (index, mut cipher_data) in data.Ciphers.into_iter().enumerate() {
|
||||
let folder_uuid = relations_map.get(&index).map(|i| folders[*i].uuid.clone());
|
||||
cipher_data.FolderId = folder_uuid;
|
||||
|
||||
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
||||
update_cipher_from_data(
|
||||
&mut cipher,
|
||||
cipher_data,
|
||||
&headers,
|
||||
false,
|
||||
&conn,
|
||||
&nt,
|
||||
UpdateType::CipherCreate,
|
||||
)?;
|
||||
|
||||
cipher.move_to_folder(folder_uuid, &headers.user.uuid.clone(), &conn)?;
|
||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None)?;
|
||||
}
|
||||
|
||||
let mut user = headers.user;
|
||||
user.update_revision(&conn)
|
||||
user.update_revision(&conn)?;
|
||||
nt.send_user_update(UpdateType::Vault, &user);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>/admin", data = "<data>")]
|
||||
@@ -608,7 +608,7 @@ fn share_cipher_by_uuid(
|
||||
None => err!("Invalid collection ID provided"),
|
||||
Some(collection) => {
|
||||
if collection.is_writable_by_user(&headers.user.uuid, &conn) {
|
||||
CollectionCipher::save(&cipher.uuid.clone(), &collection.uuid, &conn)?;
|
||||
CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn)?;
|
||||
shared_to_collection = true;
|
||||
} else {
|
||||
err!("No rights to modify the collection")
|
||||
@@ -632,7 +632,14 @@ fn share_cipher_by_uuid(
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
|
||||
fn post_attachment(uuid: String, data: Data, content_type: &ContentType, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn post_attachment(
|
||||
uuid: String,
|
||||
data: Data,
|
||||
content_type: &ContentType,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify,
|
||||
) -> JsonResult {
|
||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
@@ -646,13 +653,13 @@ fn post_attachment(uuid: String, data: Data, content_type: &ContentType, headers
|
||||
let boundary_pair = params.next().expect("No boundary provided");
|
||||
let boundary = boundary_pair.1;
|
||||
|
||||
let base_path = Path::new(&CONFIG.attachments_folder).join(&cipher.uuid);
|
||||
let base_path = Path::new(&CONFIG.attachments_folder()).join(&cipher.uuid);
|
||||
|
||||
let mut attachment_key = None;
|
||||
|
||||
Multipart::with_body(data.open(), boundary)
|
||||
.foreach_entry(|mut field| {
|
||||
match field.headers.name.as_str() {
|
||||
match &*field.headers.name {
|
||||
"key" => {
|
||||
use std::io::Read;
|
||||
let mut key_buffer = String::new();
|
||||
@@ -684,7 +691,7 @@ fn post_attachment(uuid: String, data: Data, content_type: &ContentType, headers
|
||||
};
|
||||
|
||||
let mut attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size);
|
||||
attachment.key = attachment_key.clone();
|
||||
attachment.akey = attachment_key.clone();
|
||||
attachment.save(&conn).expect("Error saving attachment");
|
||||
}
|
||||
_ => error!("Invalid multipart name"),
|
||||
@@ -692,6 +699,8 @@ fn post_attachment(uuid: String, data: Data, content_type: &ContentType, headers
|
||||
})
|
||||
.expect("Error processing multipart data");
|
||||
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
}
|
||||
|
||||
@@ -702,8 +711,9 @@ fn post_attachment_admin(
|
||||
content_type: &ContentType,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify,
|
||||
) -> JsonResult {
|
||||
post_attachment(uuid, data, content_type, headers, conn)
|
||||
post_attachment(uuid, data, content_type, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[post(
|
||||
@@ -721,7 +731,7 @@ fn post_attachment_share(
|
||||
nt: Notify,
|
||||
) -> JsonResult {
|
||||
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &nt)?;
|
||||
post_attachment(uuid, data, content_type, headers, conn)
|
||||
post_attachment(uuid, data, content_type, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/attachment/<attachment_id>/delete-admin")]
|
||||
@@ -808,83 +818,115 @@ fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn:
|
||||
delete_cipher_selected(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct MoveCipherData {
|
||||
FolderId: Option<String>,
|
||||
Ids: Vec<String>,
|
||||
}
|
||||
|
||||
#[post("/ciphers/move", data = "<data>")]
|
||||
fn move_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
fn move_cipher_selected(data: JsonUpcase<MoveCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
let data = data.into_inner().data;
|
||||
let user_uuid = headers.user.uuid;
|
||||
|
||||
let folder_id = match data.get("FolderId") {
|
||||
Some(folder_id) => match folder_id.as_str() {
|
||||
Some(folder_id) => match Folder::find_by_uuid(folder_id, &conn) {
|
||||
Some(folder) => {
|
||||
if folder.user_uuid != headers.user.uuid {
|
||||
err!("Folder is not owned by user")
|
||||
}
|
||||
Some(folder.uuid)
|
||||
if let Some(ref folder_id) = data.FolderId {
|
||||
match Folder::find_by_uuid(folder_id, &conn) {
|
||||
Some(folder) => {
|
||||
if folder.user_uuid != user_uuid {
|
||||
err!("Folder is not owned by user")
|
||||
}
|
||||
None => err!("Folder doesn't exist"),
|
||||
},
|
||||
None => err!("Folder id provided in wrong format"),
|
||||
},
|
||||
None => None,
|
||||
};
|
||||
}
|
||||
None => err!("Folder doesn't exist"),
|
||||
}
|
||||
}
|
||||
|
||||
let uuids = match data.get("Ids") {
|
||||
Some(ids) => match ids.as_array() {
|
||||
Some(ids) => ids.iter().filter_map(Value::as_str),
|
||||
None => err!("Posted ids field is not an array"),
|
||||
},
|
||||
None => err!("Request missing ids field"),
|
||||
};
|
||||
|
||||
for uuid in uuids {
|
||||
let mut cipher = match Cipher::find_by_uuid(uuid, &conn) {
|
||||
for uuid in data.Ids {
|
||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
|
||||
if !cipher.is_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
if !cipher.is_accessible_to_user(&user_uuid, &conn) {
|
||||
err!("Cipher is not accessible by user")
|
||||
}
|
||||
|
||||
// Move cipher
|
||||
cipher.move_to_folder(folder_id.clone(), &headers.user.uuid, &conn)?;
|
||||
cipher.save(&conn)?;
|
||||
cipher.move_to_folder(data.FolderId.clone(), &user_uuid, &conn)?;
|
||||
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &[user_uuid.clone()]);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/ciphers/move", data = "<data>")]
|
||||
fn move_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
fn move_cipher_selected_put(
|
||||
data: JsonUpcase<MoveCipherData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify,
|
||||
) -> EmptyResult {
|
||||
move_cipher_selected(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[post("/ciphers/purge", data = "<data>")]
|
||||
fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
#[derive(FromForm)]
|
||||
struct OrganizationId {
|
||||
#[form(field = "organizationId")]
|
||||
org_id: String,
|
||||
}
|
||||
|
||||
#[post("/ciphers/purge?<organization..>", data = "<data>")]
|
||||
fn delete_all(
|
||||
organization: Option<Form<OrganizationId>>,
|
||||
data: JsonUpcase<PasswordData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify,
|
||||
) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
|
||||
let user = headers.user;
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&password_hash) {
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
// Delete ciphers and their attachments
|
||||
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn) {
|
||||
cipher.delete(&conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
||||
}
|
||||
match organization {
|
||||
Some(org_data) => {
|
||||
// Organization ID in query params, purging organization vault
|
||||
match UserOrganization::find_by_user_and_org(&user.uuid, &org_data.org_id, &conn) {
|
||||
None => err!("You don't have permission to purge the organization vault"),
|
||||
Some(user_org) => {
|
||||
if user_org.atype == UserOrgType::Owner {
|
||||
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
|
||||
Collection::delete_all_by_organization(&org_data.org_id, &conn)?;
|
||||
nt.send_user_update(UpdateType::Vault, &user);
|
||||
Ok(())
|
||||
} else {
|
||||
err!("You don't have permission to purge the organization vault");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
// No organization ID in query params, purging user vault
|
||||
// Delete ciphers and their attachments
|
||||
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn) {
|
||||
cipher.delete(&conn)?;
|
||||
}
|
||||
|
||||
// Delete folders
|
||||
for f in Folder::find_by_user(&user.uuid, &conn) {
|
||||
f.delete(&conn)?;
|
||||
nt.send_folder_update(UpdateType::FolderCreate, &f);
|
||||
}
|
||||
// Delete folders
|
||||
for f in Folder::find_by_user(&user.uuid, &conn) {
|
||||
f.delete(&conn)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
user.update_revision(&conn)?;
|
||||
nt.send_user_update(UpdateType::Vault, &user);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult {
|
||||
@@ -929,6 +971,6 @@ fn _delete_cipher_attachment_by_id(
|
||||
|
||||
// Delete attachment
|
||||
attachment.delete(&conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -25,7 +25,7 @@ pub fn routes() -> Vec<Route> {
|
||||
fn get_folders(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||
|
||||
let folders_json: Vec<Value> = folders.iter().map(|c| c.to_json()).collect();
|
||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": folders_json,
|
||||
|
@@ -11,6 +11,7 @@ pub fn routes() -> Vec<Route> {
|
||||
get_eq_domains,
|
||||
post_eq_domains,
|
||||
put_eq_domains,
|
||||
hibp_breach,
|
||||
];
|
||||
|
||||
let mut routes = Vec::new();
|
||||
@@ -32,10 +33,10 @@ use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase};
|
||||
use crate::auth::Headers;
|
||||
use crate::db::DbConn;
|
||||
use crate::error::Error;
|
||||
|
||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||
fn clear_device_token(uuid: String) -> EmptyResult {
|
||||
@@ -62,7 +63,7 @@ fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) ->
|
||||
Ok(Json(json!({
|
||||
"Id": headers.device.uuid,
|
||||
"Name": headers.device.name,
|
||||
"Type": headers.device.type_,
|
||||
"Type": headers.device.atype,
|
||||
"Identifier": headers.device.uuid,
|
||||
"CreationDate": crate::util::format_date(&headers.device.created_at),
|
||||
})))
|
||||
@@ -116,8 +117,8 @@ fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: Db
|
||||
let mut user = headers.user;
|
||||
use serde_json::to_string;
|
||||
|
||||
user.excluded_globals = to_string(&excluded_globals).unwrap_or("[]".to_string());
|
||||
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or("[]".to_string());
|
||||
user.excluded_globals = to_string(&excluded_globals).unwrap_or_else(|_| "[]".to_string());
|
||||
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_string());
|
||||
|
||||
user.save(&conn)?;
|
||||
|
||||
@@ -128,3 +129,36 @@ fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: Db
|
||||
fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn)
|
||||
}
|
||||
|
||||
#[get("/hibp/breach?<username>")]
|
||||
fn hibp_breach(username: String) -> JsonResult {
|
||||
let user_agent = "Bitwarden_RS";
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
||||
username
|
||||
);
|
||||
|
||||
use reqwest::{header::USER_AGENT, Client};
|
||||
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
let res = Client::new()
|
||||
.get(&url)
|
||||
.header(USER_AGENT, user_agent)
|
||||
.header("hibp-api-key", api_key)
|
||||
.send()?;
|
||||
|
||||
// If we get a 404, return a 404, it means no breached accounts
|
||||
if res.status() == 404 {
|
||||
return Err(Error::empty().with_code(404));
|
||||
}
|
||||
|
||||
let value: Value = res.error_for_status()?.json()?;
|
||||
Ok(Json(value))
|
||||
} else {
|
||||
Ok(Json(json!([{
|
||||
"title": "--- Error! ---",
|
||||
"description": "HaveIBeenPwned API key not set! Go to https://haveibeenpwned.com/API/Key",
|
||||
"logopath": "/bwrs_images/error-x.svg"
|
||||
}])))
|
||||
}
|
||||
}
|
||||
|
@@ -1,19 +1,16 @@
|
||||
use rocket::request::Form;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::api::{
|
||||
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType,
|
||||
};
|
||||
use crate::auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders};
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
use crate::CONFIG;
|
||||
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType};
|
||||
use crate::auth::{decode_invite_jwt, AdminHeaders, Headers, InviteJWTClaims, OwnerHeaders};
|
||||
|
||||
use crate::mail;
|
||||
|
||||
use serde::{Deserialize, Deserializer};
|
||||
|
||||
use rocket::Route;
|
||||
use crate::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
@@ -26,6 +23,7 @@ pub fn routes() -> Vec<Route> {
|
||||
get_org_collections,
|
||||
get_org_collection_detail,
|
||||
get_collection_users,
|
||||
put_collection_users,
|
||||
put_organization,
|
||||
post_organization,
|
||||
post_organization_collections,
|
||||
@@ -78,13 +76,13 @@ struct NewCollectionData {
|
||||
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
|
||||
let data: OrgData = data.into_inner().data;
|
||||
|
||||
let mut org = Organization::new(data.Name, data.BillingEmail);
|
||||
let org = Organization::new(data.Name, data.BillingEmail);
|
||||
let mut user_org = UserOrganization::new(headers.user.uuid.clone(), org.uuid.clone());
|
||||
let mut collection = Collection::new(org.uuid.clone(), data.CollectionName);
|
||||
let collection = Collection::new(org.uuid.clone(), data.CollectionName);
|
||||
|
||||
user_org.key = data.Key;
|
||||
user_org.akey = data.Key;
|
||||
user_org.access_all = true;
|
||||
user_org.type_ = UserOrgType::Owner as i32;
|
||||
user_org.atype = UserOrgType::Owner as i32;
|
||||
user_org.status = UserOrgStatus::Confirmed as i32;
|
||||
|
||||
org.save(&conn)?;
|
||||
@@ -129,7 +127,7 @@ fn leave_organization(org_id: String, headers: Headers, conn: DbConn) -> EmptyRe
|
||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||
None => err!("User not part of organization"),
|
||||
Some(user_org) => {
|
||||
if user_org.type_ == UserOrgType::Owner {
|
||||
if user_org.atype == UserOrgType::Owner {
|
||||
let num_owners =
|
||||
UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
|
||||
|
||||
@@ -223,7 +221,7 @@ fn post_organization_collections(
|
||||
None => err!("Can't find organization details"),
|
||||
};
|
||||
|
||||
let mut collection = Collection::new(org.uuid.clone(), data.Name);
|
||||
let collection = Collection::new(org.uuid.clone(), data.Name);
|
||||
collection.save(&conn)?;
|
||||
|
||||
Ok(Json(collection.to_json()))
|
||||
@@ -371,15 +369,44 @@ fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders,
|
||||
.map(|col_user| {
|
||||
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
|
||||
.unwrap()
|
||||
.to_json_collection_user_details(col_user.read_only, &conn)
|
||||
.to_json_collection_user_details(col_user.read_only)
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": user_list,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
Ok(Json(json!(user_list)))
|
||||
}
|
||||
|
||||
#[put("/organizations/<org_id>/collections/<coll_id>/users", data = "<data>")]
|
||||
fn put_collection_users(
|
||||
org_id: String,
|
||||
coll_id: String,
|
||||
data: JsonUpcaseVec<CollectionData>,
|
||||
_headers: AdminHeaders,
|
||||
conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
// Get org and collection, check that collection is from org
|
||||
if Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn).is_none() {
|
||||
err!("Collection not found in Organization")
|
||||
}
|
||||
|
||||
// Delete all the user-collections
|
||||
CollectionUser::delete_all_by_collection(&coll_id, &conn)?;
|
||||
|
||||
// And then add all the received ones (except if the user has access_all)
|
||||
for d in data.iter().map(|d| &d.data) {
|
||||
let user = match UserOrganization::find_by_uuid(&d.Id, &conn) {
|
||||
Some(u) => u,
|
||||
None => err!("User is not part of organization"),
|
||||
};
|
||||
|
||||
if user.access_all {
|
||||
continue;
|
||||
}
|
||||
|
||||
CollectionUser::save(&user.user_uuid, &coll_id, d.ReadOnly, &conn)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
@@ -415,14 +442,6 @@ fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonRe
|
||||
})))
|
||||
}
|
||||
|
||||
fn deserialize_collections<'de, D>(deserializer: D) -> Result<Vec<CollectionData>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
// Deserialize null to empty Vec
|
||||
Deserialize::deserialize(deserializer).or(Ok(vec![]))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct CollectionData {
|
||||
@@ -435,8 +454,7 @@ struct CollectionData {
|
||||
struct InviteData {
|
||||
Emails: Vec<String>,
|
||||
Type: NumberOrString,
|
||||
#[serde(deserialize_with = "deserialize_collections")]
|
||||
Collections: Vec<CollectionData>,
|
||||
Collections: Option<Vec<CollectionData>>,
|
||||
AccessAll: Option<bool>,
|
||||
}
|
||||
|
||||
@@ -454,18 +472,19 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
||||
}
|
||||
|
||||
for email in data.Emails.iter() {
|
||||
let mut user_org_status = match CONFIG.mail {
|
||||
Some(_) => UserOrgStatus::Invited as i32,
|
||||
None => UserOrgStatus::Accepted as i32, // Automatically mark user as accepted if no email invites
|
||||
let mut user_org_status = if CONFIG.mail_enabled() {
|
||||
UserOrgStatus::Invited as i32
|
||||
} else {
|
||||
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||
};
|
||||
let user = match User::find_by_mail(&email, &conn) {
|
||||
None => {
|
||||
if !CONFIG.invitations_allowed {
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!(format!("User email does not exist: {}", email))
|
||||
}
|
||||
|
||||
if CONFIG.mail.is_none() {
|
||||
let mut invitation = Invitation::new(email.clone());
|
||||
if !CONFIG.mail_enabled() {
|
||||
let invitation = Invitation::new(email.clone());
|
||||
invitation.save(&conn)?;
|
||||
}
|
||||
|
||||
@@ -486,12 +505,12 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
||||
let mut new_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||
let access_all = data.AccessAll.unwrap_or(false);
|
||||
new_user.access_all = access_all;
|
||||
new_user.type_ = new_type;
|
||||
new_user.atype = new_type;
|
||||
new_user.status = user_org_status;
|
||||
|
||||
// If no accessAll, add the collections received
|
||||
if !access_all {
|
||||
for col in &data.Collections {
|
||||
for col in data.Collections.iter().flatten() {
|
||||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||
None => err!("Collection not found in Organization"),
|
||||
Some(collection) => {
|
||||
@@ -503,7 +522,7 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
||||
|
||||
new_user.save(&conn)?;
|
||||
|
||||
if let Some(ref mail_config) = CONFIG.mail {
|
||||
if CONFIG.mail_enabled() {
|
||||
let org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
||||
Some(org) => org.name,
|
||||
None => err!("Error looking up organization"),
|
||||
@@ -516,7 +535,6 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
||||
Some(new_user.uuid),
|
||||
&org_name,
|
||||
Some(headers.user.email.clone()),
|
||||
mail_config,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
@@ -526,11 +544,11 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
||||
|
||||
#[post("/organizations/<org_id>/users/<user_org>/reinvite")]
|
||||
fn reinvite_user(org_id: String, user_org: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.invitations_allowed {
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!("Invitations are not allowed.")
|
||||
}
|
||||
|
||||
if CONFIG.mail.is_none() {
|
||||
if !CONFIG.mail_enabled() {
|
||||
err!("SMTP is not configured.")
|
||||
}
|
||||
|
||||
@@ -553,7 +571,7 @@ fn reinvite_user(org_id: String, user_org: String, headers: AdminHeaders, conn:
|
||||
None => err!("Error looking up organization."),
|
||||
};
|
||||
|
||||
if let Some(ref mail_config) = CONFIG.mail {
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(
|
||||
&user.email,
|
||||
&user.uuid,
|
||||
@@ -561,10 +579,9 @@ fn reinvite_user(org_id: String, user_org: String, headers: AdminHeaders, conn:
|
||||
Some(user_org.uuid),
|
||||
&org_name,
|
||||
Some(headers.user.email),
|
||||
mail_config,
|
||||
)?;
|
||||
} else {
|
||||
let mut invitation = Invitation::new(user.email.clone());
|
||||
let invitation = Invitation::new(user.email.clone());
|
||||
invitation.save(&conn)?;
|
||||
}
|
||||
|
||||
@@ -582,7 +599,7 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
|
||||
// The web-vault passes org_id and org_user_id in the URL, but we are just reading them from the JWT instead
|
||||
let data: AcceptData = data.into_inner().data;
|
||||
let token = &data.Token;
|
||||
let claims: InviteJWTClaims = decode_invite_jwt(&token)?;
|
||||
let claims = decode_invite(&token)?;
|
||||
|
||||
match User::find_by_mail(&claims.email, &conn) {
|
||||
Some(_) => {
|
||||
@@ -605,7 +622,7 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
|
||||
None => err!("Invited user not found"),
|
||||
}
|
||||
|
||||
if let Some(ref mail_config) = CONFIG.mail {
|
||||
if CONFIG.mail_enabled() {
|
||||
let mut org_name = String::from("bitwarden_rs");
|
||||
if let Some(org_id) = &claims.org_id {
|
||||
org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
||||
@@ -615,10 +632,10 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
|
||||
};
|
||||
if let Some(invited_by_email) = &claims.invited_by_email {
|
||||
// User was invited to an organization, so they must be confirmed manually after acceptance
|
||||
mail::send_invite_accepted(&claims.email, invited_by_email, &org_name, mail_config)?;
|
||||
mail::send_invite_accepted(&claims.email, invited_by_email, &org_name)?;
|
||||
} else {
|
||||
// User was invited from /admin, so they are automatically confirmed
|
||||
mail::send_invite_confirmed(&claims.email, &org_name, mail_config)?;
|
||||
mail::send_invite_confirmed(&claims.email, &org_name)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -640,7 +657,7 @@ fn confirm_invite(
|
||||
None => err!("The specified user isn't a member of the organization"),
|
||||
};
|
||||
|
||||
if user_to_confirm.type_ != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
|
||||
if user_to_confirm.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
|
||||
err!("Only Owners can confirm Managers, Admins or Owners")
|
||||
}
|
||||
|
||||
@@ -649,12 +666,12 @@ fn confirm_invite(
|
||||
}
|
||||
|
||||
user_to_confirm.status = UserOrgStatus::Confirmed as i32;
|
||||
user_to_confirm.key = match data["Key"].as_str() {
|
||||
user_to_confirm.akey = match data["Key"].as_str() {
|
||||
Some(key) => key.to_string(),
|
||||
None => err!("Invalid key provided"),
|
||||
};
|
||||
|
||||
if let Some(ref mail_config) = CONFIG.mail {
|
||||
if CONFIG.mail_enabled() {
|
||||
let org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
||||
Some(org) => org.name,
|
||||
None => err!("Error looking up organization."),
|
||||
@@ -663,7 +680,7 @@ fn confirm_invite(
|
||||
Some(user) => user.email,
|
||||
None => err!("Error looking up user."),
|
||||
};
|
||||
mail::send_invite_confirmed(&address, &org_name, mail_config)?;
|
||||
mail::send_invite_confirmed(&address, &org_name)?;
|
||||
}
|
||||
|
||||
user_to_confirm.save(&conn)
|
||||
@@ -683,8 +700,7 @@ fn get_user(org_id: String, org_user_id: String, _headers: AdminHeaders, conn: D
|
||||
#[allow(non_snake_case)]
|
||||
struct EditUserData {
|
||||
Type: NumberOrString,
|
||||
#[serde(deserialize_with = "deserialize_collections")]
|
||||
Collections: Vec<CollectionData>,
|
||||
Collections: Option<Vec<CollectionData>>,
|
||||
AccessAll: bool,
|
||||
}
|
||||
|
||||
@@ -719,18 +735,18 @@ fn edit_user(
|
||||
None => err!("The specified user isn't member of the organization"),
|
||||
};
|
||||
|
||||
if new_type != user_to_edit.type_
|
||||
&& (user_to_edit.type_ >= UserOrgType::Admin || new_type >= UserOrgType::Admin)
|
||||
if new_type != user_to_edit.atype
|
||||
&& (user_to_edit.atype >= UserOrgType::Admin || new_type >= UserOrgType::Admin)
|
||||
&& headers.org_user_type != UserOrgType::Owner
|
||||
{
|
||||
err!("Only Owners can grant and remove Admin or Owner privileges")
|
||||
}
|
||||
|
||||
if user_to_edit.type_ == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner {
|
||||
if user_to_edit.atype == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner {
|
||||
err!("Only Owners can edit Owner users")
|
||||
}
|
||||
|
||||
if user_to_edit.type_ == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
// Removing owner permmission, check that there are at least another owner
|
||||
let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
|
||||
|
||||
@@ -740,7 +756,7 @@ fn edit_user(
|
||||
}
|
||||
|
||||
user_to_edit.access_all = data.AccessAll;
|
||||
user_to_edit.type_ = new_type as i32;
|
||||
user_to_edit.atype = new_type as i32;
|
||||
|
||||
// Delete all the odd collections
|
||||
for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &user_to_edit.user_uuid, &conn) {
|
||||
@@ -749,7 +765,7 @@ fn edit_user(
|
||||
|
||||
// If no accessAll, add the collections received
|
||||
if !data.AccessAll {
|
||||
for col in &data.Collections {
|
||||
for col in data.Collections.iter().flatten() {
|
||||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||
None => err!("Collection not found in Organization"),
|
||||
Some(collection) => {
|
||||
@@ -769,11 +785,11 @@ fn delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn:
|
||||
None => err!("User to delete isn't member of the organization"),
|
||||
};
|
||||
|
||||
if user_to_delete.type_ != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
|
||||
if user_to_delete.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
|
||||
err!("Only Owners can delete Admins or Owners")
|
||||
}
|
||||
|
||||
if user_to_delete.type_ == UserOrgType::Owner {
|
||||
if user_to_delete.atype == UserOrgType::Owner {
|
||||
// Removing owner, check that there are at least another owner
|
||||
let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
|
||||
|
||||
@@ -826,7 +842,7 @@ fn post_org_import(
|
||||
None => err!("User is not part of the organization"),
|
||||
};
|
||||
|
||||
if org_user.type_ < UserOrgType::Admin {
|
||||
if org_user.atype < UserOrgType::Admin {
|
||||
err!("Only admins or owners can import into an organization")
|
||||
}
|
||||
|
||||
@@ -835,7 +851,7 @@ fn post_org_import(
|
||||
.Collections
|
||||
.into_iter()
|
||||
.map(|coll| {
|
||||
let mut collection = Collection::new(org_id.clone(), coll.Name);
|
||||
let collection = Collection::new(org_id.clone(), coll.Name);
|
||||
if collection.save(&conn).is_err() {
|
||||
err!("Failed to create Collection");
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
297
src/api/icons.rs
297
src/api/icons.rs
@@ -1,14 +1,19 @@
|
||||
use std::error::Error;
|
||||
use std::fs::{create_dir_all, remove_file, symlink_metadata, File};
|
||||
use std::io::prelude::*;
|
||||
use std::time::SystemTime;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use rocket::http::ContentType;
|
||||
use rocket::response::Content;
|
||||
use rocket::Route;
|
||||
|
||||
use reqwest;
|
||||
use reqwest::{header::HeaderMap, Client, Response};
|
||||
|
||||
use rocket::http::Cookie;
|
||||
|
||||
use regex::Regex;
|
||||
use soup::prelude::*;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
@@ -17,31 +22,73 @@ pub fn routes() -> Vec<Route> {
|
||||
|
||||
const FALLBACK_ICON: &[u8; 344] = include_bytes!("../static/fallback-icon.png");
|
||||
|
||||
const ALLOWED_CHARS: &str = "_-.";
|
||||
|
||||
lazy_static! {
|
||||
// Reuse the client between requests
|
||||
static ref CLIENT: Client = Client::builder()
|
||||
.use_sys_proxy()
|
||||
.gzip(true)
|
||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||
.default_headers(_header_map())
|
||||
.build()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
fn is_valid_domain(domain: &str) -> bool {
|
||||
// Don't allow empty or too big domains or path traversal
|
||||
if domain.is_empty() || domain.len() > 255 || domain.contains("..") {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only alphanumeric or specific characters
|
||||
for c in domain.chars() {
|
||||
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon(domain: String) -> Content<Vec<u8>> {
|
||||
let icon_type = ContentType::new("image", "x-icon");
|
||||
|
||||
// Validate the domain to avoid directory traversal attacks
|
||||
if domain.contains('/') || domain.contains("..") {
|
||||
if !is_valid_domain(&domain) {
|
||||
warn!("Invalid domain: {:#?}", domain);
|
||||
return Content(icon_type, FALLBACK_ICON.to_vec());
|
||||
}
|
||||
|
||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||
info!("Icon blacklist enabled: {:#?}", blacklist);
|
||||
|
||||
let regex = Regex::new(&blacklist).expect("Valid Regex");
|
||||
|
||||
if regex.is_match(&domain) {
|
||||
warn!("Blacklisted domain: {:#?}", domain);
|
||||
return Content(icon_type, FALLBACK_ICON.to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
let icon = get_icon(&domain);
|
||||
|
||||
Content(icon_type, icon)
|
||||
}
|
||||
|
||||
fn get_icon(domain: &str) -> Vec<u8> {
|
||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder, domain);
|
||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||
|
||||
if let Some(icon) = get_cached_icon(&path) {
|
||||
return icon;
|
||||
}
|
||||
|
||||
let url = get_icon_url(&domain);
|
||||
if CONFIG.disable_icon_download() {
|
||||
return FALLBACK_ICON.to_vec();
|
||||
}
|
||||
|
||||
// Get the icon, or fallback in case of error
|
||||
match download_icon(&url) {
|
||||
match download_icon(&domain) {
|
||||
Ok(icon) => {
|
||||
save_icon(&path, &icon);
|
||||
icon
|
||||
@@ -77,7 +124,7 @@ fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Box<Error>> {
|
||||
fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
|
||||
let meta = symlink_metadata(path)?;
|
||||
let modified = meta.modified()?;
|
||||
let age = SystemTime::now().duration_since(modified)?;
|
||||
@@ -87,7 +134,7 @@ fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Box<Error>> {
|
||||
|
||||
fn icon_is_negcached(path: &str) -> bool {
|
||||
let miss_indicator = path.to_owned() + ".miss";
|
||||
let expired = file_is_expired(&miss_indicator, CONFIG.icon_cache_negttl);
|
||||
let expired = file_is_expired(&miss_indicator, CONFIG.icon_cache_negttl());
|
||||
|
||||
match expired {
|
||||
// No longer negatively cached, drop the marker
|
||||
@@ -110,34 +157,238 @@ fn mark_negcache(path: &str) {
|
||||
}
|
||||
|
||||
fn icon_is_expired(path: &str) -> bool {
|
||||
let expired = file_is_expired(path, CONFIG.icon_cache_ttl);
|
||||
let expired = file_is_expired(path, CONFIG.icon_cache_ttl());
|
||||
expired.unwrap_or(true)
|
||||
}
|
||||
|
||||
fn get_icon_url(domain: &str) -> String {
|
||||
if CONFIG.local_icon_extractor {
|
||||
format!("http://{}/favicon.ico", domain)
|
||||
} else {
|
||||
format!("https://icons.bitwarden.com/{}/icon.png", domain)
|
||||
#[derive(Debug)]
|
||||
struct Icon {
|
||||
priority: u8,
|
||||
href: String,
|
||||
}
|
||||
|
||||
impl Icon {
|
||||
fn new(priority: u8, href: String) -> Self {
|
||||
Self { href, priority }
|
||||
}
|
||||
}
|
||||
|
||||
fn download_icon(url: &str) -> Result<Vec<u8>, reqwest::Error> {
|
||||
info!("Downloading icon for {}...", url);
|
||||
let mut res = reqwest::get(url)?;
|
||||
/// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response.
|
||||
/// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies.
|
||||
/// This does not mean that that location does exists, but it is the default location browser use.
|
||||
///
|
||||
/// # Argument
|
||||
/// * `domain` - A string which holds the domain with extension.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let (mut iconlist, cookie_str) = get_icon_url("github.com")?;
|
||||
/// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?;
|
||||
/// ```
|
||||
fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
||||
// Default URL with secure and insecure schemes
|
||||
let ssldomain = format!("https://{}", domain);
|
||||
let httpdomain = format!("http://{}", domain);
|
||||
|
||||
res = res.error_for_status()?;
|
||||
// Create the iconlist
|
||||
let mut iconlist: Vec<Icon> = Vec::new();
|
||||
|
||||
let mut buffer: Vec<u8> = vec![];
|
||||
res.copy_to(&mut buffer)?;
|
||||
// Create the cookie_str to fill it all the cookies from the response
|
||||
// These cookies can be used to request/download the favicon image.
|
||||
// Some sites have extra security in place with for example XSRF Tokens.
|
||||
let mut cookie_str = String::new();
|
||||
|
||||
let resp = get_page(&ssldomain).or_else(|_| get_page(&httpdomain));
|
||||
if let Ok(content) = resp {
|
||||
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
||||
let url = content.url().clone();
|
||||
let raw_cookies = content.headers().get_all("set-cookie");
|
||||
cookie_str = raw_cookies
|
||||
.iter()
|
||||
.filter_map(|raw_cookie| raw_cookie.to_str().ok())
|
||||
.map(|cookie_str| {
|
||||
if let Ok(cookie) = Cookie::parse(cookie_str) {
|
||||
format!("{}={}; ", cookie.name(), cookie.value())
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
})
|
||||
.collect::<String>();
|
||||
|
||||
// Add the default favicon.ico to the list with the domain the content responded from.
|
||||
iconlist.push(Icon::new(35, url.join("/favicon.ico").unwrap().into_string()));
|
||||
|
||||
let soup = Soup::from_reader(content)?;
|
||||
// Search for and filter
|
||||
let favicons = soup
|
||||
.tag("link")
|
||||
.attr("rel", Regex::new(r"icon$|apple.*icon")?) // Only use icon rels
|
||||
.attr("href", Regex::new(r"(?i)\w+\.(jpg|jpeg|png|ico)(\?.*)?$")?) // Only allow specific extensions
|
||||
.find_all();
|
||||
|
||||
// Loop through all the found icons and determine it's priority
|
||||
for favicon in favicons {
|
||||
let sizes = favicon.get("sizes");
|
||||
let href = favicon.get("href").expect("Missing href");
|
||||
let full_href = url.join(&href).unwrap().into_string();
|
||||
|
||||
let priority = get_icon_priority(&full_href, sizes);
|
||||
|
||||
iconlist.push(Icon::new(priority, full_href))
|
||||
}
|
||||
} else {
|
||||
// Add the default favicon.ico to the list with just the given domain
|
||||
iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain)));
|
||||
}
|
||||
|
||||
// Sort the iconlist by priority
|
||||
iconlist.sort_by_key(|x| x.priority);
|
||||
|
||||
// There always is an icon in the list, so no need to check if it exists, and just return the first one
|
||||
Ok((iconlist, cookie_str))
|
||||
}
|
||||
|
||||
fn get_page(url: &str) -> Result<Response, Error> {
|
||||
get_page_with_cookies(url, "")
|
||||
}
|
||||
|
||||
fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error> {
|
||||
CLIENT
|
||||
.get(url)
|
||||
.header("cookie", cookie_str)
|
||||
.send()?
|
||||
.error_for_status()
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
||||
/// The lower the number the better.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `href` - A string which holds the href value or relative path.
|
||||
/// * `sizes` - The size of the icon if available as a <width>x<height> value like 32x32.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32");
|
||||
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
||||
/// ```
|
||||
fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
|
||||
// Check if there is a dimension set
|
||||
let (width, height) = parse_sizes(sizes);
|
||||
|
||||
// Check if there is a size given
|
||||
if width != 0 && height != 0 {
|
||||
// Only allow square dimensions
|
||||
if width == height {
|
||||
// Change priority by given size
|
||||
if width == 32 {
|
||||
1
|
||||
} else if width == 64 {
|
||||
2
|
||||
} else if width >= 24 && width <= 128 {
|
||||
3
|
||||
} else if width == 16 {
|
||||
4
|
||||
} else {
|
||||
5
|
||||
}
|
||||
// There are dimensions available, but the image is not a square
|
||||
} else {
|
||||
200
|
||||
}
|
||||
} else {
|
||||
// Change priority by file extension
|
||||
if href.ends_with(".png") {
|
||||
10
|
||||
} else if href.ends_with(".jpg") || href.ends_with(".jpeg") {
|
||||
20
|
||||
} else {
|
||||
30
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a Tuple with the width and hight as a seperate value extracted from the sizes attribute
|
||||
/// It will return 0 for both values if no match has been found.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `sizes` - The size of the icon if available as a <width>x<height> value like 32x32.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let (width, height) = parse_sizes("64x64"); // (64, 64)
|
||||
/// let (width, height) = parse_sizes("x128x128"); // (128, 128)
|
||||
/// let (width, height) = parse_sizes("32"); // (0, 0)
|
||||
/// ```
|
||||
fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
|
||||
let mut width: u16 = 0;
|
||||
let mut height: u16 = 0;
|
||||
|
||||
if let Some(sizes) = sizes {
|
||||
match Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap().captures(sizes.trim()) {
|
||||
None => {}
|
||||
Some(dimensions) => {
|
||||
if dimensions.len() >= 3 {
|
||||
width = dimensions[1].parse::<u16>().unwrap_or_default();
|
||||
height = dimensions[2].parse::<u16>().unwrap_or_default();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(width, height)
|
||||
}
|
||||
|
||||
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||
let (iconlist, cookie_str) = get_icon_url(&domain)?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
for icon in iconlist.iter().take(5) {
|
||||
match get_page_with_cookies(&icon.href, &cookie_str) {
|
||||
Ok(mut res) => {
|
||||
info!("Downloaded icon from {}", icon.href);
|
||||
res.copy_to(&mut buffer)?;
|
||||
break;
|
||||
}
|
||||
Err(_) => info!("Download failed for {}", icon.href),
|
||||
};
|
||||
}
|
||||
|
||||
if buffer.is_empty() {
|
||||
err!("Empty response")
|
||||
}
|
||||
|
||||
Ok(buffer)
|
||||
}
|
||||
|
||||
fn save_icon(path: &str, icon: &[u8]) {
|
||||
create_dir_all(&CONFIG.icon_cache_folder).expect("Error creating icon cache");
|
||||
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache");
|
||||
|
||||
if let Ok(mut f) = File::create(path) {
|
||||
f.write_all(icon).expect("Error writing icon file");
|
||||
};
|
||||
}
|
||||
|
||||
fn _header_map() -> HeaderMap {
|
||||
// Set some default headers for the request.
|
||||
// Use a browser like user-agent to make sure most websites will return there correct website.
|
||||
use reqwest::header::*;
|
||||
|
||||
macro_rules! headers {
|
||||
($( $name:ident : $value:literal),+ $(,)? ) => {
|
||||
let mut headers = HeaderMap::new();
|
||||
$( headers.insert($name, HeaderValue::from_static($value)); )+
|
||||
headers
|
||||
};
|
||||
}
|
||||
|
||||
headers! {
|
||||
USER_AGENT: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299",
|
||||
ACCEPT_LANGUAGE: "en-US,en;q=0.8",
|
||||
CACHE_CONTROL: "no-cache",
|
||||
PRAGMA: "no-cache",
|
||||
ACCEPT: "text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8",
|
||||
}
|
||||
}
|
||||
|
@@ -9,12 +9,14 @@ use num_traits::FromPrimitive;
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::util::{self, JsonMap};
|
||||
use crate::util;
|
||||
|
||||
use crate::api::{ApiResult, EmptyResult, JsonResult};
|
||||
|
||||
use crate::auth::ClientIp;
|
||||
|
||||
use crate::mail;
|
||||
|
||||
use crate::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
@@ -68,7 +70,7 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
||||
"expires_in": expires_in,
|
||||
"token_type": "Bearer",
|
||||
"refresh_token": device.refresh_token,
|
||||
"Key": user.key,
|
||||
"Key": user.akey,
|
||||
"PrivateKey": user.private_key,
|
||||
})))
|
||||
}
|
||||
@@ -99,26 +101,19 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
|
||||
)
|
||||
}
|
||||
|
||||
// On iOS, device_type sends "iOS", on others it sends a number
|
||||
let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(0);
|
||||
let device_id = data.device_identifier.clone().expect("No device id provided");
|
||||
let device_name = data.device_name.clone().expect("No device name provided");
|
||||
let (mut device, new_device) = get_device(&data, &conn, &user);
|
||||
|
||||
// Find device or create new
|
||||
let mut device = match Device::find_by_uuid(&device_id, &conn) {
|
||||
Some(device) => {
|
||||
// Check if owned device, and recreate if not
|
||||
if device.user_uuid != user.uuid {
|
||||
info!("Device exists but is owned by another user. The old device will be discarded");
|
||||
Device::new(device_id, user.uuid.clone(), device_name, device_type)
|
||||
} else {
|
||||
device
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &conn)?;
|
||||
|
||||
if CONFIG.mail_enabled() && new_device {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &device.updated_at, &device.name) {
|
||||
error!("Error sending new device email: {:#?}", e);
|
||||
|
||||
if CONFIG.require_device_email() {
|
||||
err!("Could not send login notification email. Please contact your administrator.")
|
||||
}
|
||||
}
|
||||
None => Device::new(device_id, user.uuid.clone(), device_name, device_type),
|
||||
};
|
||||
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data.clone(), &mut device, &conn)?;
|
||||
}
|
||||
|
||||
// Common
|
||||
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
|
||||
@@ -132,9 +127,8 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
|
||||
"expires_in": expires_in,
|
||||
"token_type": "Bearer",
|
||||
"refresh_token": device.refresh_token,
|
||||
"Key": user.key,
|
||||
"Key": user.akey,
|
||||
"PrivateKey": user.private_key,
|
||||
//"TwoFactorToken": "11122233333444555666777888999"
|
||||
});
|
||||
|
||||
if let Some(token) = twofactor_token {
|
||||
@@ -145,72 +139,82 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
/// Retrieves an existing device or creates a new device from ConnectData and the User
|
||||
fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) {
|
||||
// On iOS, device_type sends "iOS", on others it sends a number
|
||||
let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(0);
|
||||
let device_id = data.device_identifier.clone().expect("No device id provided");
|
||||
let device_name = data.device_name.clone().expect("No device name provided");
|
||||
|
||||
let mut new_device = false;
|
||||
// Find device or create new
|
||||
let device = match Device::find_by_uuid(&device_id, &conn) {
|
||||
Some(device) => {
|
||||
// Check if owned device, and recreate if not
|
||||
if device.user_uuid != user.uuid {
|
||||
info!("Device exists but is owned by another user. The old device will be discarded");
|
||||
new_device = true;
|
||||
Device::new(device_id, user.uuid.clone(), device_name, device_type)
|
||||
} else {
|
||||
device
|
||||
}
|
||||
}
|
||||
None => {
|
||||
new_device = true;
|
||||
Device::new(device_id, user.uuid.clone(), device_name, device_type)
|
||||
}
|
||||
};
|
||||
|
||||
(device, new_device)
|
||||
}
|
||||
|
||||
fn twofactor_auth(
|
||||
user_uuid: &str,
|
||||
data: &ConnectData,
|
||||
device: &mut Device,
|
||||
conn: &DbConn,
|
||||
) -> ApiResult<Option<String>> {
|
||||
let twofactors_raw = TwoFactor::find_by_user(user_uuid, conn);
|
||||
// Remove u2f challenge twofactors (impl detail)
|
||||
let twofactors: Vec<_> = twofactors_raw.iter().filter(|tf| tf.type_ < 1000).collect();
|
||||
|
||||
let providers: Vec<_> = twofactors.iter().map(|tf| tf.type_).collect();
|
||||
let twofactors = TwoFactor::find_by_user(user_uuid, conn);
|
||||
|
||||
// No twofactor token if twofactor is disabled
|
||||
if twofactors.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let provider = data.two_factor_provider.unwrap_or(providers[0]); // If we aren't given a two factor provider, asume the first one
|
||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, asume the first one
|
||||
|
||||
let twofactor_code = match data.two_factor_token {
|
||||
Some(ref code) => code,
|
||||
None => err_json!(_json_err_twofactor(&providers, user_uuid, conn)?),
|
||||
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?),
|
||||
};
|
||||
|
||||
let twofactor = twofactors.iter().filter(|tf| tf.type_ == provider).nth(0);
|
||||
let selected_twofactor = twofactors.into_iter().filter(|tf| tf.atype == selected_id).nth(0);
|
||||
|
||||
use crate::api::core::two_factor as _tf;
|
||||
use crate::crypto::ct_eq;
|
||||
|
||||
let selected_data = _selected_data(selected_twofactor);
|
||||
let mut remember = data.two_factor_remember.unwrap_or(0);
|
||||
|
||||
match TwoFactorType::from_i32(selected_id) {
|
||||
Some(TwoFactorType::Authenticator) => _tf::validate_totp_code_str(twofactor_code, &selected_data?)?,
|
||||
Some(TwoFactorType::U2f) => _tf::validate_u2f_login(user_uuid, twofactor_code, conn)?,
|
||||
Some(TwoFactorType::YubiKey) => _tf::validate_yubikey_login(twofactor_code, &selected_data?)?,
|
||||
Some(TwoFactorType::Duo) => _tf::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?,
|
||||
|
||||
match TwoFactorType::from_i32(provider) {
|
||||
Some(TwoFactorType::Remember) => {
|
||||
match device.twofactor_remember {
|
||||
Some(ref remember) if remember == twofactor_code => return Ok(None), // No twofactor token needed here
|
||||
_ => err_json!(_json_err_twofactor(&providers, user_uuid, conn)?),
|
||||
Some(ref code) if !CONFIG.disable_2fa_remember() && ct_eq(code, twofactor_code) => {
|
||||
remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time
|
||||
}
|
||||
_ => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?),
|
||||
}
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Authenticator) => {
|
||||
let twofactor = match twofactor {
|
||||
Some(tf) => tf,
|
||||
None => err!("TOTP not enabled"),
|
||||
};
|
||||
|
||||
let totp_code: u64 = match twofactor_code.parse() {
|
||||
Ok(code) => code,
|
||||
_ => err!("Invalid TOTP code"),
|
||||
};
|
||||
|
||||
if !twofactor.check_totp_code(totp_code) {
|
||||
err_json!(_json_err_twofactor(&providers, user_uuid, conn)?)
|
||||
}
|
||||
}
|
||||
|
||||
Some(TwoFactorType::U2f) => {
|
||||
use crate::api::core::two_factor;
|
||||
|
||||
two_factor::validate_u2f_login(user_uuid, &twofactor_code, conn)?;
|
||||
}
|
||||
|
||||
Some(TwoFactorType::YubiKey) => {
|
||||
use crate::api::core::two_factor;
|
||||
|
||||
two_factor::validate_yubikey_login(user_uuid, twofactor_code, conn)?;
|
||||
}
|
||||
|
||||
_ => err!("Invalid two factor provider"),
|
||||
}
|
||||
|
||||
if data.two_factor_remember.unwrap_or(0) == 1 {
|
||||
if !CONFIG.disable_2fa_remember() && remember == 1 {
|
||||
Ok(Some(device.refresh_twofactor_remember()))
|
||||
} else {
|
||||
device.delete_twofactor_remember();
|
||||
@@ -218,6 +222,13 @@ fn twofactor_auth(
|
||||
}
|
||||
}
|
||||
|
||||
fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
|
||||
match tf {
|
||||
Some(tf) => Ok(tf.data),
|
||||
None => err!("Two factor doesn't exist"),
|
||||
}
|
||||
}
|
||||
|
||||
fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {
|
||||
use crate::api::core::two_factor;
|
||||
|
||||
@@ -234,27 +245,38 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
match TwoFactorType::from_i32(*provider) {
|
||||
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
||||
|
||||
Some(TwoFactorType::U2f) if CONFIG.domain_set => {
|
||||
Some(TwoFactorType::U2f) if CONFIG.domain_set() => {
|
||||
let request = two_factor::generate_u2f_login(user_uuid, conn)?;
|
||||
let mut challenge_list = Vec::new();
|
||||
|
||||
for key in request.registered_keys {
|
||||
let mut challenge_map = JsonMap::new();
|
||||
|
||||
challenge_map.insert("appId".into(), Value::String(request.app_id.clone()));
|
||||
challenge_map.insert("challenge".into(), Value::String(request.challenge.clone()));
|
||||
challenge_map.insert("version".into(), Value::String(key.version));
|
||||
challenge_map.insert("keyHandle".into(), Value::String(key.key_handle.unwrap_or_default()));
|
||||
|
||||
challenge_list.push(Value::Object(challenge_map));
|
||||
challenge_list.push(json!({
|
||||
"appId": request.app_id,
|
||||
"challenge": request.challenge,
|
||||
"version": key.version,
|
||||
"keyHandle": key.key_handle,
|
||||
}));
|
||||
}
|
||||
|
||||
let mut map = JsonMap::new();
|
||||
use serde_json;
|
||||
let challenge_list_str = serde_json::to_string(&challenge_list).unwrap();
|
||||
|
||||
map.insert("Challenges".into(), Value::String(challenge_list_str));
|
||||
result["TwoFactorProviders2"][provider.to_string()] = Value::Object(map);
|
||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||
"Challenges": challenge_list_str,
|
||||
});
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Duo) => {
|
||||
let email = match User::find_by_uuid(user_uuid, &conn) {
|
||||
Some(u) => u.email,
|
||||
None => err!("User does not exist"),
|
||||
};
|
||||
|
||||
let (signature, host) = two_factor::generate_duo_signature(&email, conn)?;
|
||||
|
||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||
"Host": host,
|
||||
"Signature": signature,
|
||||
});
|
||||
}
|
||||
|
||||
Some(tf_type @ TwoFactorType::YubiKey) => {
|
||||
@@ -263,12 +285,11 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
None => err!("No YubiKey devices registered"),
|
||||
};
|
||||
|
||||
let yubikey_metadata: two_factor::YubikeyMetadata =
|
||||
serde_json::from_str(&twofactor.data).expect("Can't parse Yubikey Metadata");
|
||||
let yubikey_metadata: two_factor::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
|
||||
|
||||
let mut map = JsonMap::new();
|
||||
map.insert("Nfc".into(), Value::Bool(yubikey_metadata.Nfc));
|
||||
result["TwoFactorProviders2"][provider.to_string()] = Value::Object(map);
|
||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||
"Nfc": yubikey_metadata.Nfc,
|
||||
})
|
||||
}
|
||||
|
||||
_ => {}
|
||||
|
@@ -23,6 +23,7 @@ pub type EmptyResult = ApiResult<()>;
|
||||
|
||||
use crate::util;
|
||||
type JsonUpcase<T> = Json<util::UpCase<T>>;
|
||||
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;
|
||||
|
||||
// Common structs representing JSON data received
|
||||
#[derive(Deserialize)]
|
||||
@@ -46,10 +47,13 @@ impl NumberOrString {
|
||||
}
|
||||
}
|
||||
|
||||
fn into_i32(self) -> Option<i32> {
|
||||
fn into_i32(self) -> ApiResult<i32> {
|
||||
use std::num::ParseIntError as PIE;
|
||||
match self {
|
||||
NumberOrString::Number(n) => Some(n),
|
||||
NumberOrString::String(s) => s.parse().ok(),
|
||||
NumberOrString::Number(n) => Ok(n),
|
||||
NumberOrString::String(s) => s
|
||||
.parse()
|
||||
.map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -25,7 +25,7 @@ fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||
let conn_id = BASE64URL.encode(&crypto::get_random(vec![0u8; 16]));
|
||||
let mut available_transports: Vec<JsonValue> = Vec::new();
|
||||
|
||||
if CONFIG.websocket_enabled {
|
||||
if CONFIG.websocket_enabled() {
|
||||
available_transports.push(json!({"transport":"WebSockets", "transferFormats":["Text","Binary"]}));
|
||||
}
|
||||
|
||||
@@ -88,13 +88,10 @@ fn serialize(val: Value) -> Vec<u8> {
|
||||
|
||||
fn serialize_date(date: NaiveDateTime) -> Value {
|
||||
let seconds: i64 = date.timestamp();
|
||||
let nanos: i64 = date.timestamp_subsec_nanos() as i64;
|
||||
let nanos: i64 = date.timestamp_subsec_nanos().into();
|
||||
let timestamp = nanos << 34 | seconds;
|
||||
|
||||
use byteorder::{BigEndian, WriteBytesExt};
|
||||
|
||||
let mut bs = [0u8; 8];
|
||||
bs.as_mut().write_i64::<BigEndian>(timestamp).expect("Unable to write");
|
||||
let bs = timestamp.to_be_bytes();
|
||||
|
||||
// -1 is Timestamp
|
||||
// https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type
|
||||
@@ -138,7 +135,7 @@ impl Handler for WSHandler {
|
||||
|
||||
// Validate the user
|
||||
use crate::auth;
|
||||
let claims = match auth::decode_jwt(access_token) {
|
||||
let claims = match auth::decode_login(access_token) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => return Err(ws::Error::new(ws::ErrorKind::Internal, "Invalid access token provided")),
|
||||
};
|
||||
@@ -233,7 +230,7 @@ pub struct WebSocketUsers {
|
||||
}
|
||||
|
||||
impl WebSocketUsers {
|
||||
fn send_update(&self, user_uuid: &String, data: &[u8]) -> ws::Result<()> {
|
||||
fn send_update(&self, user_uuid: &str, data: &[u8]) -> ws::Result<()> {
|
||||
if let Some(user) = self.map.get(user_uuid) {
|
||||
for sender in user.iter() {
|
||||
sender.send(data)?;
|
||||
@@ -243,7 +240,6 @@ impl WebSocketUsers {
|
||||
}
|
||||
|
||||
// NOTE: The last modified date needs to be updated before calling these methods
|
||||
#[allow(dead_code)]
|
||||
pub fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||
let data = create_update(
|
||||
vec![
|
||||
@@ -253,7 +249,7 @@ impl WebSocketUsers {
|
||||
ut,
|
||||
);
|
||||
|
||||
self.send_update(&user.uuid.clone(), &data).ok();
|
||||
self.send_update(&user.uuid, &data).ok();
|
||||
}
|
||||
|
||||
pub fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {
|
||||
@@ -328,6 +324,7 @@ fn create_ping() -> Vec<u8> {
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(PartialEq)]
|
||||
pub enum UpdateType {
|
||||
CipherUpdate = 0,
|
||||
CipherCreate = 1,
|
||||
@@ -343,6 +340,8 @@ pub enum UpdateType {
|
||||
SyncSettings = 10,
|
||||
|
||||
LogOut = 11,
|
||||
|
||||
None = 100,
|
||||
}
|
||||
|
||||
use rocket::State;
|
||||
@@ -352,9 +351,12 @@ pub fn start_notification_server() -> WebSocketUsers {
|
||||
let factory = WSFactory::init();
|
||||
let users = factory.users.clone();
|
||||
|
||||
if CONFIG.websocket_enabled {
|
||||
if CONFIG.websocket_enabled() {
|
||||
thread::spawn(move || {
|
||||
WebSocket::new(factory).unwrap().listen(&CONFIG.websocket_url).unwrap();
|
||||
WebSocket::new(factory)
|
||||
.unwrap()
|
||||
.listen((CONFIG.websocket_address().as_str(), CONFIG.websocket_port()))
|
||||
.unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
|
@@ -2,18 +2,19 @@ use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use rocket::http::ContentType;
|
||||
use rocket::request::Request;
|
||||
use rocket::response::content::Content;
|
||||
use rocket::response::{self, NamedFile, Responder};
|
||||
use rocket::response::NamedFile;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::util::Cached;
|
||||
use crate::error::Error;
|
||||
use crate::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
if CONFIG.web_vault_enabled {
|
||||
routes![web_index, app_id, web_files, admin_page, attachments, alive]
|
||||
if CONFIG.web_vault_enabled() {
|
||||
routes![web_index, app_id, web_files, attachments, alive, images]
|
||||
} else {
|
||||
routes![attachments, alive]
|
||||
}
|
||||
@@ -21,7 +22,9 @@ pub fn routes() -> Vec<Route> {
|
||||
|
||||
#[get("/")]
|
||||
fn web_index() -> Cached<io::Result<NamedFile>> {
|
||||
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder).join("index.html")))
|
||||
Cached::short(NamedFile::open(
|
||||
Path::new(&CONFIG.web_vault_folder()).join("index.html"),
|
||||
))
|
||||
}
|
||||
|
||||
#[get("/app-id.json")]
|
||||
@@ -35,7 +38,7 @@ fn app_id() -> Cached<Content<Json<Value>>> {
|
||||
{
|
||||
"version": { "major": 1, "minor": 0 },
|
||||
"ids": [
|
||||
&CONFIG.domain,
|
||||
&CONFIG.domain(),
|
||||
"ios:bundle-id:com.8bit.bitwarden",
|
||||
"android:apk-key-hash:dUGFzUzf3lmHSLBDBIv+WaFyZMI" ]
|
||||
}]
|
||||
@@ -43,55 +46,14 @@ fn app_id() -> Cached<Content<Json<Value>>> {
|
||||
))
|
||||
}
|
||||
|
||||
const ADMIN_PAGE: &'static str = include_str!("../static/admin.html");
|
||||
use rocket::response::content::Html;
|
||||
|
||||
#[get("/admin")]
|
||||
fn admin_page() -> Cached<Html<&'static str>> {
|
||||
Cached::short(Html(ADMIN_PAGE))
|
||||
}
|
||||
|
||||
/* // Use this during Admin page development
|
||||
#[get("/admin")]
|
||||
fn admin_page() -> Cached<io::Result<NamedFile>> {
|
||||
Cached::short(NamedFile::open("src/static/admin.html"))
|
||||
}
|
||||
*/
|
||||
|
||||
#[get("/<p..>", rank = 1)] // Only match this if the other routes don't match
|
||||
#[get("/<p..>", rank = 10)] // Only match this if the other routes don't match
|
||||
fn web_files(p: PathBuf) -> Cached<io::Result<NamedFile>> {
|
||||
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder).join(p)))
|
||||
}
|
||||
|
||||
struct Cached<R>(R, &'static str);
|
||||
|
||||
impl<R> Cached<R> {
|
||||
fn long(r: R) -> Cached<R> {
|
||||
// 7 days
|
||||
Cached(r, "public, max-age=604800")
|
||||
}
|
||||
|
||||
fn short(r: R) -> Cached<R> {
|
||||
// 10 minutes
|
||||
Cached(r, "public, max-age=600")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'r, R: Responder<'r>> Responder<'r> for Cached<R> {
|
||||
fn respond_to(self, req: &Request) -> response::Result<'r> {
|
||||
match self.0.respond_to(req) {
|
||||
Ok(mut res) => {
|
||||
res.set_raw_header("Cache-Control", self.1);
|
||||
Ok(res)
|
||||
}
|
||||
e @ Err(_) => e,
|
||||
}
|
||||
}
|
||||
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)))
|
||||
}
|
||||
|
||||
#[get("/attachments/<uuid>/<file..>")]
|
||||
fn attachments(uuid: String, file: PathBuf) -> io::Result<NamedFile> {
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder).join(uuid).join(file))
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file))
|
||||
}
|
||||
|
||||
#[get("/alive")]
|
||||
@@ -101,3 +63,13 @@ fn alive() -> Json<String> {
|
||||
|
||||
Json(format_date(&Utc::now().naive_utc()))
|
||||
}
|
||||
|
||||
#[get("/bwrs_images/<filename>")]
|
||||
fn images(filename: String) -> Result<Content<&'static [u8]>, Error> {
|
||||
match filename.as_ref() {
|
||||
"mail-github.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/mail-github.png"))),
|
||||
"logo-gray.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))),
|
||||
"error-x.svg" => Ok(Content(ContentType::SVG, include_bytes!("../static/images/error-x.svg"))),
|
||||
_ => err!("Image not found"),
|
||||
}
|
||||
}
|
94
src/auth.rs
94
src/auth.rs
@@ -5,6 +5,7 @@ use crate::util::read_file;
|
||||
use chrono::{Duration, Utc};
|
||||
|
||||
use jsonwebtoken::{self, Algorithm, Header};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::ser::Serialize;
|
||||
|
||||
use crate::error::{Error, MapResult};
|
||||
@@ -14,21 +15,17 @@ const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref DEFAULT_VALIDITY: Duration = Duration::hours(2);
|
||||
pub static ref JWT_ISSUER: String = CONFIG.domain.clone();
|
||||
static ref JWT_HEADER: Header = Header::new(JWT_ALGORITHM);
|
||||
static ref PRIVATE_RSA_KEY: Vec<u8> = match read_file(&CONFIG.private_rsa_key) {
|
||||
pub static ref JWT_LOGIN_ISSUER: String = format!("{}|login", CONFIG.domain());
|
||||
pub static ref JWT_INVITE_ISSUER: String = format!("{}|invite", CONFIG.domain());
|
||||
pub static ref JWT_ADMIN_ISSUER: String = format!("{}|admin", CONFIG.domain());
|
||||
static ref PRIVATE_RSA_KEY: Vec<u8> = match read_file(&CONFIG.private_rsa_key()) {
|
||||
Ok(key) => key,
|
||||
Err(e) => panic!(
|
||||
"Error loading private RSA Key from {}\n Error: {}",
|
||||
CONFIG.private_rsa_key, e
|
||||
),
|
||||
Err(e) => panic!("Error loading private RSA Key.\n Error: {}", e),
|
||||
};
|
||||
static ref PUBLIC_RSA_KEY: Vec<u8> = match read_file(&CONFIG.public_rsa_key) {
|
||||
static ref PUBLIC_RSA_KEY: Vec<u8> = match read_file(&CONFIG.public_rsa_key()) {
|
||||
Ok(key) => key,
|
||||
Err(e) => panic!(
|
||||
"Error loading public RSA Key from {}\n Error: {}",
|
||||
CONFIG.public_rsa_key, e
|
||||
),
|
||||
Err(e) => panic!("Error loading public RSA Key.\n Error: {}", e),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -39,14 +36,13 @@ pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode_jwt(token: &str) -> Result<JWTClaims, Error> {
|
||||
fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Error> {
|
||||
let validation = jsonwebtoken::Validation {
|
||||
leeway: 30, // 30 seconds
|
||||
validate_exp: true,
|
||||
validate_iat: false, // IssuedAt is the same as NotBefore
|
||||
validate_nbf: true,
|
||||
aud: None,
|
||||
iss: Some(JWT_ISSUER.clone()),
|
||||
iss: Some(issuer),
|
||||
sub: None,
|
||||
algorithms: vec![JWT_ALGORITHM],
|
||||
};
|
||||
@@ -55,30 +51,23 @@ pub fn decode_jwt(token: &str) -> Result<JWTClaims, Error> {
|
||||
|
||||
jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation)
|
||||
.map(|d| d.claims)
|
||||
.map_res("Error decoding login JWT")
|
||||
.map_res("Error decoding JWT")
|
||||
}
|
||||
|
||||
pub fn decode_invite_jwt(token: &str) -> Result<InviteJWTClaims, Error> {
|
||||
let validation = jsonwebtoken::Validation {
|
||||
leeway: 30, // 30 seconds
|
||||
validate_exp: true,
|
||||
validate_iat: false, // IssuedAt is the same as NotBefore
|
||||
validate_nbf: true,
|
||||
aud: None,
|
||||
iss: Some(JWT_ISSUER.clone()),
|
||||
sub: None,
|
||||
algorithms: vec![JWT_ALGORITHM],
|
||||
};
|
||||
pub fn decode_login(token: &str) -> Result<LoginJWTClaims, Error> {
|
||||
decode_jwt(token, JWT_LOGIN_ISSUER.to_string())
|
||||
}
|
||||
|
||||
let token = token.replace(char::is_whitespace, "");
|
||||
pub fn decode_invite(token: &str) -> Result<InviteJWTClaims, Error> {
|
||||
decode_jwt(token, JWT_INVITE_ISSUER.to_string())
|
||||
}
|
||||
|
||||
jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation)
|
||||
.map(|d| d.claims)
|
||||
.map_res("Error decoding invite JWT")
|
||||
pub fn decode_admin(token: &str) -> Result<AdminJWTClaims, Error> {
|
||||
decode_jwt(token, JWT_ADMIN_ISSUER.to_string())
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct JWTClaims {
|
||||
pub struct LoginJWTClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
@@ -125,17 +114,18 @@ pub struct InviteJWTClaims {
|
||||
pub invited_by_email: Option<String>,
|
||||
}
|
||||
|
||||
pub fn generate_invite_claims(uuid: String,
|
||||
email: String,
|
||||
org_id: Option<String>,
|
||||
org_user_id: Option<String>,
|
||||
invited_by_email: Option<String>,
|
||||
pub fn generate_invite_claims(
|
||||
uuid: String,
|
||||
email: String,
|
||||
org_id: Option<String>,
|
||||
org_user_id: Option<String>,
|
||||
invited_by_email: Option<String>,
|
||||
) -> InviteJWTClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
InviteJWTClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
iss: JWT_ISSUER.to_string(),
|
||||
iss: JWT_INVITE_ISSUER.to_string(),
|
||||
sub: uuid.clone(),
|
||||
email: email.clone(),
|
||||
org_id: org_id.clone(),
|
||||
@@ -144,6 +134,28 @@ pub fn generate_invite_claims(uuid: String,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AdminJWTClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
pub exp: i64,
|
||||
// Issuer
|
||||
pub iss: String,
|
||||
// Subject
|
||||
pub sub: String,
|
||||
}
|
||||
|
||||
pub fn generate_admin_claims() -> AdminJWTClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
AdminJWTClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(20)).timestamp(),
|
||||
iss: JWT_ADMIN_ISSUER.to_string(),
|
||||
sub: "admin_panel".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Bearer token authentication
|
||||
//
|
||||
@@ -166,8 +178,8 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
let headers = request.headers();
|
||||
|
||||
// Get host
|
||||
let host = if CONFIG.domain_set {
|
||||
CONFIG.domain.clone()
|
||||
let host = if CONFIG.domain_set() {
|
||||
CONFIG.domain()
|
||||
} else if let Some(referer) = headers.get_one("Referer") {
|
||||
referer.to_string()
|
||||
} else {
|
||||
@@ -203,7 +215,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
};
|
||||
|
||||
// Check JWT token is valid and get device and user from it
|
||||
let claims: JWTClaims = match decode_jwt(access_token) {
|
||||
let claims = match decode_login(access_token) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => err_handler!("Invalid claim"),
|
||||
};
|
||||
@@ -274,7 +286,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
||||
device: headers.device,
|
||||
user,
|
||||
org_user_type: {
|
||||
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.type_) {
|
||||
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) {
|
||||
org_usr_type
|
||||
} else {
|
||||
// This should only happen if the DB is corrupted
|
||||
|
605
src/config.rs
Normal file
605
src/config.rs
Normal file
@@ -0,0 +1,605 @@
|
||||
use std::process::exit;
|
||||
use std::sync::RwLock;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::util::get_env;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: Config = Config::load().unwrap_or_else(|e| {
|
||||
println!("Error loading config:\n\t{:?}\n", e);
|
||||
exit(12)
|
||||
});
|
||||
pub static ref CONFIG_FILE: String = {
|
||||
let data_folder = get_env("DATA_FOLDER").unwrap_or_else(|| String::from("data"));
|
||||
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{}/config.json", data_folder))
|
||||
};
|
||||
}
|
||||
|
||||
pub type Pass = String;
|
||||
|
||||
macro_rules! make_config {
|
||||
($(
|
||||
$(#[doc = $groupdoc:literal])?
|
||||
$group:ident $(: $group_enabled:ident)? {
|
||||
$(
|
||||
$(#[doc = $doc:literal])+
|
||||
$name:ident : $ty:ty, $editable:literal, $none_action:ident $(, $default:expr)?;
|
||||
)+},
|
||||
)+) => {
|
||||
pub struct Config { inner: RwLock<Inner> }
|
||||
|
||||
struct Inner {
|
||||
templates: Handlebars,
|
||||
config: ConfigItems,
|
||||
|
||||
_env: ConfigBuilder,
|
||||
_usr: ConfigBuilder,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
|
||||
pub struct ConfigBuilder {
|
||||
$($(
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
$name: Option<$ty>,
|
||||
)+)+
|
||||
}
|
||||
|
||||
impl ConfigBuilder {
|
||||
fn from_env() -> Self {
|
||||
dotenv::from_path(".env").ok();
|
||||
|
||||
let mut builder = ConfigBuilder::default();
|
||||
$($(
|
||||
builder.$name = get_env(&stringify!($name).to_uppercase());
|
||||
)+)+
|
||||
|
||||
builder
|
||||
}
|
||||
|
||||
fn from_file(path: &str) -> Result<Self, Error> {
|
||||
use crate::util::read_file_string;
|
||||
let config_str = read_file_string(path)?;
|
||||
serde_json::from_str(&config_str).map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Merges the values of both builders into a new builder.
|
||||
/// If both have the same element, `other` wins.
|
||||
fn merge(&self, other: &Self, show_overrides: bool) -> Self {
|
||||
let mut overrides = Vec::new();
|
||||
let mut builder = self.clone();
|
||||
$($(
|
||||
if let v @Some(_) = &other.$name {
|
||||
builder.$name = v.clone();
|
||||
|
||||
if self.$name.is_some() {
|
||||
overrides.push(stringify!($name).to_uppercase());
|
||||
}
|
||||
}
|
||||
)+)+
|
||||
|
||||
if show_overrides && !overrides.is_empty() {
|
||||
// We can't use warn! here because logging isn't setup yet.
|
||||
println!("[WARNING] The following environment variables are being overriden by the config file,");
|
||||
println!("[WARNING] please use the admin panel to make changes to them:");
|
||||
println!("[WARNING] {}\n", overrides.join(", "));
|
||||
}
|
||||
|
||||
builder
|
||||
}
|
||||
|
||||
/// Returns a new builder with all the elements from self,
|
||||
/// except those that are equal in both sides
|
||||
fn _remove(&self, other: &Self) -> Self {
|
||||
let mut builder = ConfigBuilder::default();
|
||||
$($(
|
||||
if &self.$name != &other.$name {
|
||||
builder.$name = self.$name.clone();
|
||||
}
|
||||
|
||||
)+)+
|
||||
builder
|
||||
}
|
||||
|
||||
fn build(&self) -> ConfigItems {
|
||||
let mut config = ConfigItems::default();
|
||||
let _domain_set = self.domain.is_some();
|
||||
$($(
|
||||
config.$name = make_config!{ @build self.$name.clone(), &config, $none_action, $($default)? };
|
||||
)+)+
|
||||
config.domain_set = _domain_set;
|
||||
|
||||
config
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ConfigItems { $($(pub $name: make_config!{@type $ty, $none_action}, )+)+ }
|
||||
|
||||
#[allow(unused)]
|
||||
impl Config {
|
||||
$($(
|
||||
pub fn $name(&self) -> make_config!{@type $ty, $none_action} {
|
||||
self.inner.read().unwrap().config.$name.clone()
|
||||
}
|
||||
)+)+
|
||||
|
||||
pub fn prepare_json(&self) -> serde_json::Value {
|
||||
let (def, cfg) = {
|
||||
let inner = &self.inner.read().unwrap();
|
||||
(inner._env.build(), inner.config.clone())
|
||||
};
|
||||
|
||||
|
||||
fn _get_form_type(rust_type: &str) -> &'static str {
|
||||
match rust_type {
|
||||
"Pass" => "password",
|
||||
"String" => "text",
|
||||
"bool" => "checkbox",
|
||||
_ => "number"
|
||||
}
|
||||
}
|
||||
|
||||
fn _get_doc(doc: &str) -> serde_json::Value {
|
||||
let mut split = doc.split("|>").map(str::trim);
|
||||
json!({
|
||||
"name": split.next(),
|
||||
"description": split.next()
|
||||
})
|
||||
}
|
||||
|
||||
json!([ $({
|
||||
"group": stringify!($group),
|
||||
"grouptoggle": stringify!($($group_enabled)?),
|
||||
"groupdoc": make_config!{ @show $($groupdoc)? },
|
||||
"elements": [
|
||||
$( {
|
||||
"editable": $editable,
|
||||
"name": stringify!($name),
|
||||
"value": cfg.$name,
|
||||
"default": def.$name,
|
||||
"type": _get_form_type(stringify!($ty)),
|
||||
"doc": _get_doc(concat!($($doc),+)),
|
||||
}, )+
|
||||
]}, )+ ])
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Group or empty string
|
||||
( @show ) => { "" };
|
||||
( @show $lit:literal ) => { $lit };
|
||||
|
||||
// Wrap the optionals in an Option type
|
||||
( @type $ty:ty, option) => { Option<$ty> };
|
||||
( @type $ty:ty, $id:ident) => { $ty };
|
||||
|
||||
// Generate the values depending on none_action
|
||||
( @build $value:expr, $config:expr, option, ) => { $value };
|
||||
( @build $value:expr, $config:expr, def, $default:expr ) => { $value.unwrap_or($default) };
|
||||
( @build $value:expr, $config:expr, auto, $default_fn:expr ) => {{
|
||||
match $value {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn;
|
||||
f($config)
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
//STRUCTURE:
|
||||
// /// Short description (without this they won't appear on the list)
|
||||
// group {
|
||||
// /// Friendly Name |> Description (Optional)
|
||||
// name: type, is_editable, none_action, <default_value (Optional)>
|
||||
// }
|
||||
//
|
||||
// Where none_action applied when the value wasn't provided and can be:
|
||||
// def: Use a default value
|
||||
// auto: Value is auto generated based on other values
|
||||
// option: Value is optional
|
||||
make_config! {
|
||||
folders {
|
||||
/// Data folder |> Main data folder
|
||||
data_folder: String, false, def, "data".to_string();
|
||||
/// Database URL
|
||||
database_url: String, false, auto, |c| format!("{}/{}", c.data_folder, "db.sqlite3");
|
||||
/// Icon cache folder
|
||||
icon_cache_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "icon_cache");
|
||||
/// Attachments folder
|
||||
attachments_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "attachments");
|
||||
/// Templates folder
|
||||
templates_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "templates");
|
||||
/// Session JWT key
|
||||
rsa_key_filename: String, false, auto, |c| format!("{}/{}", c.data_folder, "rsa_key");
|
||||
/// Web vault folder
|
||||
web_vault_folder: String, false, def, "web-vault/".to_string();
|
||||
},
|
||||
ws {
|
||||
/// Enable websocket notifications
|
||||
websocket_enabled: bool, false, def, false;
|
||||
/// Websocket address
|
||||
websocket_address: String, false, def, "0.0.0.0".to_string();
|
||||
/// Websocket port
|
||||
websocket_port: u16, false, def, 3012;
|
||||
},
|
||||
|
||||
/// General settings
|
||||
settings {
|
||||
/// Domain URL |> This needs to be set to the URL used to access the server, including 'http[s]://'
|
||||
/// and port, if it's different than the default. Some server functions don't work correctly without this value
|
||||
domain: String, true, def, "http://localhost".to_string();
|
||||
/// Domain Set |> Indicates if the domain is set by the admin. Otherwise the default will be used.
|
||||
domain_set: bool, false, def, false;
|
||||
/// Enable web vault
|
||||
web_vault_enabled: bool, false, def, true;
|
||||
|
||||
/// HIBP Api Key |> HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||
hibp_api_key: Pass, true, option;
|
||||
|
||||
/// Disable icon downloads |> Set to true to disable icon downloading, this would still serve icons from
|
||||
/// $ICON_CACHE_FOLDER, but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||
/// otherwise it will delete them and they won't be downloaded again.
|
||||
disable_icon_download: bool, true, def, false;
|
||||
/// Allow new signups |> Controls if new users can register. Note that while this is disabled, users could still be invited
|
||||
signups_allowed: bool, true, def, true;
|
||||
/// Allow invitations |> Controls whether users can be invited by organization admins, even when signups are disabled
|
||||
invitations_allowed: bool, true, def, true;
|
||||
/// Password iterations |> Number of server-side passwords hashing iterations.
|
||||
/// The changes only apply when a user changes their password. Not recommended to lower the value
|
||||
password_iterations: i32, true, def, 100_000;
|
||||
/// Show password hints |> Controls if the password hint should be shown directly in the web page.
|
||||
/// Otherwise, if email is disabled, there is no way to see the password hint
|
||||
show_password_hint: bool, true, def, true;
|
||||
|
||||
/// Admin page token |> The token used to authenticate in this very same page. Changing it here won't deauthorize the current session
|
||||
admin_token: Pass, true, option;
|
||||
},
|
||||
|
||||
/// Advanced settings
|
||||
advanced {
|
||||
/// Positive icon cache expiry |> Number of seconds to consider that an already cached icon is fresh. After this period, the icon will be redownloaded
|
||||
icon_cache_ttl: u64, true, def, 2_592_000;
|
||||
/// Negative icon cache expiry |> Number of seconds before trying to download an icon that failed again.
|
||||
icon_cache_negttl: u64, true, def, 259_200;
|
||||
/// Icon download timeout |> Number of seconds when to stop attempting to download an icon.
|
||||
icon_download_timeout: u64, true, def, 10;
|
||||
/// Icon blacklist Regex |> Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||
/// Useful to hide other servers in the local network. Check the WIKI for more details
|
||||
icon_blacklist_regex: String, true, option;
|
||||
|
||||
/// Disable Two-Factor remember |> Enabling this would force the users to use a second factor to login every time.
|
||||
/// Note that the checkbox would still be present, but ignored.
|
||||
disable_2fa_remember: bool, true, def, false;
|
||||
|
||||
/// Require new device emails |> When a user logs in an email is required to be sent.
|
||||
/// If sending the email fails the login attempt will fail.
|
||||
require_device_email: bool, true, def, false;
|
||||
|
||||
/// Reload templates (Dev) |> When this is set to true, the templates get reloaded with every request.
|
||||
/// ONLY use this during development, as it can slow down the server
|
||||
reload_templates: bool, true, def, false;
|
||||
|
||||
/// Log routes at launch (Dev)
|
||||
log_mounts: bool, true, def, false;
|
||||
/// Enable extended logging
|
||||
extended_logging: bool, false, def, true;
|
||||
/// Enable the log to output to Syslog
|
||||
use_syslog: bool, false, def, false;
|
||||
/// Log file path
|
||||
log_file: String, false, option;
|
||||
/// Log level
|
||||
log_level: String, false, def, "Info".to_string();
|
||||
|
||||
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using bitwarden_rs on some exotic filesystems,
|
||||
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
|
||||
enable_db_wal: bool, false, def, true;
|
||||
|
||||
/// Disable Admin Token (Know the risks!) |> Disables the Admin Token for the admin page so you may use your own auth in-front
|
||||
disable_admin_token: bool, true, def, false;
|
||||
},
|
||||
|
||||
/// Yubikey settings
|
||||
yubico: _enable_yubico {
|
||||
/// Enabled
|
||||
_enable_yubico: bool, true, def, true;
|
||||
/// Client ID
|
||||
yubico_client_id: String, true, option;
|
||||
/// Secret Key
|
||||
yubico_secret_key: Pass, true, option;
|
||||
/// Server
|
||||
yubico_server: String, true, option;
|
||||
},
|
||||
|
||||
/// Global Duo settings (Note that users can override them)
|
||||
duo: _enable_duo {
|
||||
/// Enabled
|
||||
_enable_duo: bool, true, def, false;
|
||||
/// Integration Key
|
||||
duo_ikey: String, true, option;
|
||||
/// Secret Key
|
||||
duo_skey: Pass, true, option;
|
||||
/// Host
|
||||
duo_host: String, true, option;
|
||||
/// Application Key (generated automatically)
|
||||
_duo_akey: Pass, false, option;
|
||||
},
|
||||
|
||||
/// SMTP Email Settings
|
||||
smtp: _enable_smtp {
|
||||
/// Enabled
|
||||
_enable_smtp: bool, true, def, true;
|
||||
/// Host
|
||||
smtp_host: String, true, option;
|
||||
/// Enable SSL
|
||||
smtp_ssl: bool, true, def, true;
|
||||
/// Use explicit TLS |> Enabling this would force the use of an explicit TLS connection, instead of upgrading an insecure one with STARTTLS
|
||||
smtp_explicit_tls: bool, true, def, false;
|
||||
/// Port
|
||||
smtp_port: u16, true, auto, |c| if c.smtp_explicit_tls {465} else if c.smtp_ssl {587} else {25};
|
||||
/// From Address
|
||||
smtp_from: String, true, def, String::new();
|
||||
/// From Name
|
||||
smtp_from_name: String, true, def, "Bitwarden_RS".to_string();
|
||||
/// Username
|
||||
smtp_username: String, true, option;
|
||||
/// Password
|
||||
smtp_password: Pass, true, option;
|
||||
/// Json form auth mechanism |> Defaults for ssl is "Plain" and "Login" and nothing for non-ssl connections. Possible values: ["Plain", "Login", "Xoauth2"]
|
||||
smtp_auth_mechanism: String, true, option;
|
||||
},
|
||||
}
|
||||
|
||||
fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
if let Some(ref token) = cfg.admin_token {
|
||||
if token.trim().is_empty() {
|
||||
err!("`ADMIN_TOKEN` is enabled but has an empty value. To enable the admin page without token, use `DISABLE_ADMIN_TOKEN`")
|
||||
}
|
||||
}
|
||||
|
||||
if (cfg.duo_host.is_some() || cfg.duo_ikey.is_some() || cfg.duo_skey.is_some())
|
||||
&& !(cfg.duo_host.is_some() && cfg.duo_ikey.is_some() && cfg.duo_skey.is_some())
|
||||
{
|
||||
err!("All Duo options need to be set for global Duo support")
|
||||
}
|
||||
|
||||
if cfg.yubico_client_id.is_some() != cfg.yubico_secret_key.is_some() {
|
||||
err!("Both `YUBICO_CLIENT_ID` and `YUBICO_SECRET_KEY` need to be set for Yubikey OTP support")
|
||||
}
|
||||
|
||||
if cfg.smtp_host.is_some() == cfg.smtp_from.is_empty() {
|
||||
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support")
|
||||
}
|
||||
|
||||
if cfg.smtp_username.is_some() != cfg.smtp_password.is_some() {
|
||||
err!("Both `SMTP_USERNAME` and `SMTP_PASSWORD` need to be set to enable email authentication")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn load() -> Result<Self, Error> {
|
||||
// Loading from env and file
|
||||
let _env = ConfigBuilder::from_env();
|
||||
let _usr = ConfigBuilder::from_file(&CONFIG_FILE).unwrap_or_default();
|
||||
|
||||
// Create merged config, config file overwrites env
|
||||
let builder = _env.merge(&_usr, true);
|
||||
|
||||
// Fill any missing with defaults
|
||||
let config = builder.build();
|
||||
validate_config(&config)?;
|
||||
|
||||
Ok(Config {
|
||||
inner: RwLock::new(Inner {
|
||||
templates: load_templates(&config.templates_folder),
|
||||
config,
|
||||
_env,
|
||||
_usr,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_config(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||
// Remove default values
|
||||
//let builder = other.remove(&self.inner.read().unwrap()._env);
|
||||
|
||||
// TODO: Remove values that are defaults, above only checks those set by env and not the defaults
|
||||
let builder = other;
|
||||
|
||||
// Serialize now before we consume the builder
|
||||
let config_str = serde_json::to_string_pretty(&builder)?;
|
||||
|
||||
// Prepare the combined config
|
||||
let config = {
|
||||
let env = &self.inner.read().unwrap()._env;
|
||||
env.merge(&builder, false).build()
|
||||
};
|
||||
validate_config(&config)?;
|
||||
|
||||
// Save both the user and the combined config
|
||||
{
|
||||
let mut writer = self.inner.write().unwrap();
|
||||
writer.config = config;
|
||||
writer._usr = builder;
|
||||
}
|
||||
|
||||
//Save to file
|
||||
use std::{fs::File, io::Write};
|
||||
let mut file = File::create(&*CONFIG_FILE)?;
|
||||
file.write_all(config_str.as_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||
let builder = {
|
||||
let usr = &self.inner.read().unwrap()._usr;
|
||||
usr.merge(&other, false)
|
||||
};
|
||||
self.update_config(builder)
|
||||
}
|
||||
|
||||
pub fn delete_user_config(&self) -> Result<(), Error> {
|
||||
crate::util::delete_file(&CONFIG_FILE)?;
|
||||
|
||||
// Empty user config
|
||||
let usr = ConfigBuilder::default();
|
||||
|
||||
// Config now is env + defaults
|
||||
let config = {
|
||||
let env = &self.inner.read().unwrap()._env;
|
||||
env.build()
|
||||
};
|
||||
|
||||
// Save configs
|
||||
{
|
||||
let mut writer = self.inner.write().unwrap();
|
||||
writer.config = config;
|
||||
writer._usr = usr;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn private_rsa_key(&self) -> String {
|
||||
format!("{}.der", CONFIG.rsa_key_filename())
|
||||
}
|
||||
pub fn private_rsa_key_pem(&self) -> String {
|
||||
format!("{}.pem", CONFIG.rsa_key_filename())
|
||||
}
|
||||
pub fn public_rsa_key(&self) -> String {
|
||||
format!("{}.pub.der", CONFIG.rsa_key_filename())
|
||||
}
|
||||
pub fn mail_enabled(&self) -> bool {
|
||||
let inner = &self.inner.read().unwrap().config;
|
||||
inner._enable_smtp && inner.smtp_host.is_some()
|
||||
}
|
||||
|
||||
pub fn get_duo_akey(&self) -> String {
|
||||
if let Some(akey) = self._duo_akey() {
|
||||
akey
|
||||
} else {
|
||||
let akey = crate::crypto::get_random_64();
|
||||
let akey_s = data_encoding::BASE64.encode(&akey);
|
||||
|
||||
// Save the new value
|
||||
let mut builder = ConfigBuilder::default();
|
||||
builder._duo_akey = Some(akey_s.clone());
|
||||
self.update_config_partial(builder).ok();
|
||||
|
||||
akey_s
|
||||
}
|
||||
}
|
||||
|
||||
pub fn render_template<T: serde::ser::Serialize>(
|
||||
&self,
|
||||
name: &str,
|
||||
data: &T,
|
||||
) -> Result<String, crate::error::Error> {
|
||||
if CONFIG.reload_templates() {
|
||||
warn!("RELOADING TEMPLATES");
|
||||
let hb = load_templates(CONFIG.templates_folder().as_ref());
|
||||
hb.render(name, data).map_err(Into::into)
|
||||
} else {
|
||||
let hb = &CONFIG.inner.read().unwrap().templates;
|
||||
hb.render(name, data).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use handlebars::{
|
||||
Context, Handlebars, Helper, HelperDef, HelperResult, Output, RenderContext, RenderError, Renderable,
|
||||
};
|
||||
|
||||
fn load_templates(path: &str) -> Handlebars {
|
||||
let mut hb = Handlebars::new();
|
||||
// Error on missing params
|
||||
hb.set_strict_mode(true);
|
||||
// Register helpers
|
||||
hb.register_helper("case", Box::new(CaseHelper));
|
||||
hb.register_helper("jsesc", Box::new(JsEscapeHelper));
|
||||
|
||||
macro_rules! reg {
|
||||
($name:expr) => {{
|
||||
let template = include_str!(concat!("static/templates/", $name, ".hbs"));
|
||||
hb.register_template_string($name, template).unwrap();
|
||||
}};
|
||||
($name:expr, $ext:expr) => {{
|
||||
reg!($name);
|
||||
reg!(concat!($name, $ext));
|
||||
}};
|
||||
}
|
||||
|
||||
// First register default templates here
|
||||
reg!("email/invite_accepted", ".html");
|
||||
reg!("email/invite_confirmed", ".html");
|
||||
reg!("email/new_device_logged_in", ".html");
|
||||
reg!("email/pw_hint_none", ".html");
|
||||
reg!("email/pw_hint_some", ".html");
|
||||
reg!("email/send_org_invite", ".html");
|
||||
|
||||
reg!("admin/base");
|
||||
reg!("admin/login");
|
||||
reg!("admin/page");
|
||||
|
||||
// And then load user templates to overwrite the defaults
|
||||
// Use .hbs extension for the files
|
||||
// Templates get registered with their relative name
|
||||
hb.register_templates_directory(".hbs", path).unwrap();
|
||||
|
||||
hb
|
||||
}
|
||||
|
||||
pub struct CaseHelper;
|
||||
|
||||
impl HelperDef for CaseHelper {
|
||||
fn call<'reg: 'rc, 'rc>(
|
||||
&self,
|
||||
h: &Helper<'reg, 'rc>,
|
||||
r: &'reg Handlebars,
|
||||
ctx: &Context,
|
||||
rc: &mut RenderContext<'reg>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h
|
||||
.param(0)
|
||||
.ok_or_else(|| RenderError::new("Param not found for helper \"case\""))?;
|
||||
let value = param.value().clone();
|
||||
|
||||
if h.params().iter().skip(1).any(|x| x.value() == &value) {
|
||||
h.template().map(|t| t.render(r, ctx, rc, out)).unwrap_or(Ok(()))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct JsEscapeHelper;
|
||||
|
||||
impl HelperDef for JsEscapeHelper {
|
||||
fn call<'reg: 'rc, 'rc>(
|
||||
&self,
|
||||
h: &Helper<'reg, 'rc>,
|
||||
_: &'reg Handlebars,
|
||||
_: &Context,
|
||||
_: &mut RenderContext<'reg>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h
|
||||
.param(0)
|
||||
.ok_or_else(|| RenderError::new("Param not found for helper \"js_escape\""))?;
|
||||
|
||||
let value = param
|
||||
.value()
|
||||
.as_str()
|
||||
.ok_or_else(|| RenderError::new("Param for helper \"js_escape\" is not a String"))?;
|
||||
|
||||
let escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
||||
let quoted_value = format!(""{}"", escaped_value);
|
||||
|
||||
out.write("ed_value)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
@@ -2,7 +2,8 @@
|
||||
// PBKDF2 derivation
|
||||
//
|
||||
|
||||
use ring::{digest, pbkdf2};
|
||||
use ring::{digest, hmac, pbkdf2};
|
||||
use std::num::NonZeroU32;
|
||||
|
||||
static DIGEST_ALG: &digest::Algorithm = &digest::SHA256;
|
||||
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
|
||||
@@ -10,15 +11,29 @@ const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
|
||||
pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {
|
||||
let mut out = vec![0u8; OUTPUT_LEN]; // Initialize array with zeros
|
||||
|
||||
let iterations = NonZeroU32::new(iterations).expect("Iterations can't be zero");
|
||||
pbkdf2::derive(DIGEST_ALG, iterations, salt, secret, &mut out);
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
pub fn verify_password_hash(secret: &[u8], salt: &[u8], previous: &[u8], iterations: u32) -> bool {
|
||||
let iterations = NonZeroU32::new(iterations).expect("Iterations can't be zero");
|
||||
pbkdf2::verify(DIGEST_ALG, iterations, salt, secret, previous).is_ok()
|
||||
}
|
||||
|
||||
//
|
||||
// HMAC
|
||||
//
|
||||
pub fn hmac_sign(key: &str, data: &str) -> String {
|
||||
use data_encoding::HEXLOWER;
|
||||
|
||||
let key = hmac::SigningKey::new(&digest::SHA1, key.as_bytes());
|
||||
let signature = hmac::sign(&key, data.as_bytes());
|
||||
|
||||
HEXLOWER.encode(signature.as_ref())
|
||||
}
|
||||
|
||||
//
|
||||
// Random values
|
||||
//
|
||||
@@ -36,3 +51,12 @@ pub fn get_random(mut array: Vec<u8>) -> Vec<u8> {
|
||||
|
||||
array
|
||||
}
|
||||
|
||||
//
|
||||
// Constant time compare
|
||||
//
|
||||
pub fn ct_eq<T: AsRef<[u8]>, U: AsRef<[u8]>>(a: T, b: U) -> bool {
|
||||
use ring::constant_time::verify_slices_are_equal;
|
||||
|
||||
verify_slices_are_equal(a.as_ref(), b.as_ref()).is_ok()
|
||||
}
|
||||
|
@@ -2,36 +2,62 @@ use std::ops::Deref;
|
||||
|
||||
use diesel::r2d2;
|
||||
use diesel::r2d2::ConnectionManager;
|
||||
use diesel::sqlite::SqliteConnection;
|
||||
use diesel::{Connection as DieselConnection, ConnectionError};
|
||||
|
||||
use rocket::http::Status;
|
||||
use rocket::request::{self, FromRequest};
|
||||
use rocket::{Outcome, Request, State};
|
||||
|
||||
use crate::error::Error;
|
||||
use chrono::prelude::*;
|
||||
use std::process::Command;
|
||||
|
||||
use crate::CONFIG;
|
||||
|
||||
/// An alias to the database connection used
|
||||
type Connection = SqliteConnection;
|
||||
#[cfg(feature = "sqlite")]
|
||||
type Connection = diesel::sqlite::SqliteConnection;
|
||||
#[cfg(feature = "mysql")]
|
||||
type Connection = diesel::mysql::MysqlConnection;
|
||||
|
||||
/// An alias to the type for a pool of Diesel SQLite connections.
|
||||
/// An alias to the type for a pool of Diesel connections.
|
||||
type Pool = r2d2::Pool<ConnectionManager<Connection>>;
|
||||
|
||||
/// Connection request guard type: a wrapper around an r2d2 pooled connection.
|
||||
pub struct DbConn(pub r2d2::PooledConnection<ConnectionManager<Connection>>);
|
||||
|
||||
pub mod models;
|
||||
#[cfg(feature = "sqlite")]
|
||||
#[path = "schemas/sqlite/schema.rs"]
|
||||
pub mod schema;
|
||||
#[cfg(feature = "mysql")]
|
||||
#[path = "schemas/mysql/schema.rs"]
|
||||
pub mod schema;
|
||||
|
||||
/// Initializes a database pool.
|
||||
pub fn init_pool() -> Pool {
|
||||
let manager = ConnectionManager::new(&*CONFIG.database_url);
|
||||
let manager = ConnectionManager::new(CONFIG.database_url());
|
||||
|
||||
r2d2::Pool::builder().build(manager).expect("Failed to create pool")
|
||||
}
|
||||
|
||||
pub fn get_connection() -> Result<Connection, ConnectionError> {
|
||||
Connection::establish(&CONFIG.database_url)
|
||||
Connection::establish(&CONFIG.database_url())
|
||||
}
|
||||
|
||||
/// Creates a back-up of the database using sqlite3
|
||||
pub fn backup_database() -> Result<(), Error> {
|
||||
let now: DateTime<Utc> = Utc::now();
|
||||
let file_date = now.format("%Y%m%d").to_string();
|
||||
let backup_command: String = format!("{}{}{}", ".backup 'db_", file_date, ".sqlite3'");
|
||||
|
||||
Command::new("sqlite3")
|
||||
.current_dir("./data")
|
||||
.args(&["db.sqlite3", &backup_command])
|
||||
.output()
|
||||
.expect("Can't open database, sqlite3 is not available, make sure it's installed and available on the PATH");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Attempts to retrieve a single connection from the managed database pool. If
|
||||
|
@@ -12,7 +12,7 @@ pub struct Attachment {
|
||||
pub cipher_uuid: String,
|
||||
pub file_name: String,
|
||||
pub file_size: i32,
|
||||
pub key: Option<String>,
|
||||
pub akey: Option<String>,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
@@ -23,12 +23,12 @@ impl Attachment {
|
||||
cipher_uuid,
|
||||
file_name,
|
||||
file_size,
|
||||
key: None,
|
||||
akey: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_file_path(&self) -> String {
|
||||
format!("{}/{}/{}", CONFIG.attachments_folder, self.cipher_uuid, self.id)
|
||||
format!("{}/{}/{}", CONFIG.attachments_folder(), self.cipher_uuid, self.id)
|
||||
}
|
||||
|
||||
pub fn to_json(&self, host: &str) -> Value {
|
||||
@@ -43,7 +43,7 @@ impl Attachment {
|
||||
"FileName": self.file_name,
|
||||
"Size": self.file_size.to_string(),
|
||||
"SizeName": display_size,
|
||||
"Key": self.key,
|
||||
"Key": self.akey,
|
||||
"Object": "attachment"
|
||||
})
|
||||
}
|
||||
@@ -85,6 +85,8 @@ impl Attachment {
|
||||
}
|
||||
|
||||
pub fn find_by_id(id: &str, conn: &DbConn) -> Option<Self> {
|
||||
let id = id.to_lowercase();
|
||||
|
||||
attachments::table
|
||||
.filter(attachments::id.eq(id))
|
||||
.first::<Self>(&**conn)
|
||||
|
@@ -24,7 +24,7 @@ pub struct Cipher {
|
||||
Card = 3,
|
||||
Identity = 4
|
||||
*/
|
||||
pub type_: i32,
|
||||
pub atype: i32,
|
||||
pub name: String,
|
||||
pub notes: Option<String>,
|
||||
pub fields: Option<String>,
|
||||
@@ -37,7 +37,7 @@ pub struct Cipher {
|
||||
|
||||
/// Local methods
|
||||
impl Cipher {
|
||||
pub fn new(type_: i32, name: String) -> Self {
|
||||
pub fn new(atype: i32, name: String) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
@@ -48,7 +48,7 @@ impl Cipher {
|
||||
user_uuid: None,
|
||||
organization_uuid: None,
|
||||
|
||||
type_,
|
||||
atype,
|
||||
favorite: false,
|
||||
name,
|
||||
|
||||
@@ -72,31 +72,20 @@ use crate::error::MapResult;
|
||||
/// Database methods
|
||||
impl Cipher {
|
||||
pub fn to_json(&self, host: &str, user_uuid: &str, conn: &DbConn) -> Value {
|
||||
use super::Attachment;
|
||||
use crate::util::format_date;
|
||||
use serde_json;
|
||||
|
||||
let attachments = Attachment::find_by_cipher(&self.uuid, conn);
|
||||
let attachments_json: Vec<Value> = attachments.iter().map(|c| c.to_json(host)).collect();
|
||||
|
||||
let fields_json: Value = if let Some(ref fields) = self.fields {
|
||||
serde_json::from_str(fields).unwrap()
|
||||
} else {
|
||||
Value::Null
|
||||
};
|
||||
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
let password_history_json = self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
|
||||
let password_history_json: Value = if let Some(ref password_history) = self.password_history {
|
||||
serde_json::from_str(password_history).unwrap()
|
||||
} else {
|
||||
Value::Null
|
||||
};
|
||||
|
||||
let mut data_json: Value = serde_json::from_str(&self.data).unwrap();
|
||||
let mut data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
|
||||
|
||||
// TODO: ******* Backwards compat start **********
|
||||
// To remove backwards compatibility, just remove this entire section
|
||||
// and remove the compat code from ciphers::update_cipher_from_data
|
||||
if self.type_ == 1 && data_json["Uris"].is_array() {
|
||||
if self.atype == 1 && data_json["Uris"].is_array() {
|
||||
let uri = data_json["Uris"][0]["Uri"].clone();
|
||||
data_json["Uri"] = uri;
|
||||
}
|
||||
@@ -104,7 +93,7 @@ impl Cipher {
|
||||
|
||||
let mut json_object = json!({
|
||||
"Id": self.uuid,
|
||||
"Type": self.type_,
|
||||
"Type": self.atype,
|
||||
"RevisionDate": format_date(&self.updated_at),
|
||||
"FolderId": self.get_folder_uuid(&user_uuid, &conn),
|
||||
"Favorite": self.favorite,
|
||||
@@ -125,7 +114,7 @@ impl Cipher {
|
||||
"PasswordHistory": password_history_json,
|
||||
});
|
||||
|
||||
let key = match self.type_ {
|
||||
let key = match self.atype {
|
||||
1 => "Login",
|
||||
2 => "SecureNote",
|
||||
3 => "Card",
|
||||
@@ -196,40 +185,28 @@ impl Cipher {
|
||||
}
|
||||
|
||||
pub fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
match self.get_folder_uuid(&user_uuid, &conn) {
|
||||
None => {
|
||||
match folder_uuid {
|
||||
Some(new_folder) => {
|
||||
self.update_users_revision(conn);
|
||||
let folder_cipher = FolderCipher::new(&new_folder, &self.uuid);
|
||||
folder_cipher.save(&conn)
|
||||
}
|
||||
None => Ok(()), //nothing to do
|
||||
}
|
||||
}
|
||||
Some(current_folder) => {
|
||||
match folder_uuid {
|
||||
Some(new_folder) => {
|
||||
if current_folder == new_folder {
|
||||
Ok(()) //nothing to do
|
||||
} else {
|
||||
self.update_users_revision(conn);
|
||||
if let Some(current_folder) =
|
||||
FolderCipher::find_by_folder_and_cipher(¤t_folder, &self.uuid, &conn)
|
||||
{
|
||||
current_folder.delete(&conn)?;
|
||||
}
|
||||
FolderCipher::new(&new_folder, &self.uuid).save(&conn)
|
||||
}
|
||||
}
|
||||
None => {
|
||||
self.update_users_revision(conn);
|
||||
match FolderCipher::find_by_folder_and_cipher(¤t_folder, &self.uuid, &conn) {
|
||||
Some(current_folder) => current_folder.delete(&conn),
|
||||
None => err!("Couldn't move from previous folder"),
|
||||
}
|
||||
}
|
||||
User::update_uuid_revision(user_uuid, &conn);
|
||||
|
||||
match (self.get_folder_uuid(&user_uuid, &conn), folder_uuid) {
|
||||
// No changes
|
||||
(None, None) => Ok(()),
|
||||
(Some(ref old), Some(ref new)) if old == new => Ok(()),
|
||||
|
||||
// Add to folder
|
||||
(None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(&conn),
|
||||
|
||||
// Remove from folder
|
||||
(Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, &conn) {
|
||||
Some(old) => old.delete(&conn),
|
||||
None => err!("Couldn't move from previous folder"),
|
||||
},
|
||||
|
||||
// Move to another folder
|
||||
(Some(old), Some(new)) => {
|
||||
if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, &conn) {
|
||||
old.delete(&conn)?;
|
||||
}
|
||||
FolderCipher::new(&new, &self.uuid).save(&conn)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -251,7 +228,7 @@ impl Cipher {
|
||||
// Cipher owner
|
||||
users_organizations::access_all.eq(true).or(
|
||||
// access_all in Organization
|
||||
users_organizations::type_.le(UserOrgType::Admin as i32).or(
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32).or(
|
||||
// Org admin or owner
|
||||
users_collections::user_uuid.eq(user_uuid).and(
|
||||
users_collections::read_only.eq(false), //R/W access to collection
|
||||
@@ -282,7 +259,7 @@ impl Cipher {
|
||||
// Cipher owner
|
||||
users_organizations::access_all.eq(true).or(
|
||||
// access_all in Organization
|
||||
users_organizations::type_.le(UserOrgType::Admin as i32).or(
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32).or(
|
||||
// Org admin or owner
|
||||
users_collections::user_uuid.eq(user_uuid), // Access to Collection
|
||||
),
|
||||
@@ -329,7 +306,7 @@ impl Cipher {
|
||||
))
|
||||
.filter(ciphers::user_uuid.eq(user_uuid).or( // Cipher owner
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::type_.le(UserOrgType::Admin as i32).or( // Org admin or owner
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32).or( // Org admin or owner
|
||||
users_collections::user_uuid.eq(user_uuid).and( // Access to Collection
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
)
|
||||
@@ -379,7 +356,7 @@ impl Cipher {
|
||||
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||
.filter(users_collections::user_uuid.eq(user_id).or( // User has access to collection
|
||||
users_organizations::access_all.eq(true).or( // User has access all
|
||||
users_organizations::type_.le(UserOrgType::Admin as i32) // User is admin or owner
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // User is admin or owner
|
||||
)
|
||||
))
|
||||
.select(ciphers_collections::collection_uuid)
|
||||
|
@@ -43,21 +43,17 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Collection {
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
// Update affected users revision
|
||||
UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn)
|
||||
.iter()
|
||||
.for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
|
||||
diesel::replace_into(collections::table)
|
||||
.values(&*self)
|
||||
.values(self)
|
||||
.execute(&**conn)
|
||||
.map_res("Error saving collection")
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
CollectionCipher::delete_all_by_collection(&self.uuid, &conn)?;
|
||||
CollectionUser::delete_all_by_collection(&self.uuid, &conn)?;
|
||||
|
||||
@@ -73,6 +69,14 @@ impl Collection {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_users_revision(&self, conn: &DbConn) {
|
||||
UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn)
|
||||
.iter()
|
||||
.for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
collections::table
|
||||
.filter(collections::uuid.eq(uuid))
|
||||
@@ -142,7 +146,7 @@ impl Collection {
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::type_.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
)
|
||||
)
|
||||
).select(collections::all_columns)
|
||||
@@ -241,7 +245,9 @@ impl CollectionUser {
|
||||
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
CollectionUser::find_by_collection(&collection_uuid, conn)
|
||||
.iter()
|
||||
.for_each(|collection| User::update_uuid_revision(&collection.user_uuid, conn));
|
||||
.for_each(|collection| {
|
||||
User::update_uuid_revision(&collection.user_uuid, conn);
|
||||
});
|
||||
|
||||
diesel::delete(users_collections::table.filter(users_collections::collection_uuid.eq(collection_uuid)))
|
||||
.execute(&**conn)
|
||||
@@ -272,6 +278,7 @@ pub struct CollectionCipher {
|
||||
/// Database methods
|
||||
impl CollectionCipher {
|
||||
pub fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(&collection_uuid, conn);
|
||||
diesel::replace_into(ciphers_collections::table)
|
||||
.values((
|
||||
ciphers_collections::cipher_uuid.eq(cipher_uuid),
|
||||
@@ -282,6 +289,7 @@ impl CollectionCipher {
|
||||
}
|
||||
|
||||
pub fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(&collection_uuid, conn);
|
||||
diesel::delete(
|
||||
ciphers_collections::table
|
||||
.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid))
|
||||
@@ -302,4 +310,10 @@ impl CollectionCipher {
|
||||
.execute(&**conn)
|
||||
.map_res("Error removing ciphers from collection")
|
||||
}
|
||||
|
||||
pub fn update_users_revision(collection_uuid: &str, conn: &DbConn) {
|
||||
if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn) {
|
||||
collection.update_users_revision(conn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -15,7 +15,7 @@ pub struct Device {
|
||||
|
||||
pub name: String,
|
||||
/// https://github.com/bitwarden/core/tree/master/src/Core/Enums
|
||||
pub type_: i32,
|
||||
pub atype: i32,
|
||||
pub push_token: Option<String>,
|
||||
|
||||
pub refresh_token: String,
|
||||
@@ -25,7 +25,7 @@ pub struct Device {
|
||||
|
||||
/// Local methods
|
||||
impl Device {
|
||||
pub fn new(uuid: String, user_uuid: String, name: String, type_: i32) -> Self {
|
||||
pub fn new(uuid: String, user_uuid: String, name: String, atype: i32) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
@@ -35,7 +35,7 @@ impl Device {
|
||||
|
||||
user_uuid,
|
||||
name,
|
||||
type_,
|
||||
atype,
|
||||
|
||||
push_token: None,
|
||||
refresh_token: String::new(),
|
||||
@@ -70,18 +70,18 @@ impl Device {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
self.updated_at = time_now;
|
||||
|
||||
let orgowner: Vec<_> = orgs.iter().filter(|o| o.type_ == 0).map(|o| o.org_uuid.clone()).collect();
|
||||
let orgadmin: Vec<_> = orgs.iter().filter(|o| o.type_ == 1).map(|o| o.org_uuid.clone()).collect();
|
||||
let orguser: Vec<_> = orgs.iter().filter(|o| o.type_ == 2).map(|o| o.org_uuid.clone()).collect();
|
||||
let orgmanager: Vec<_> = orgs.iter().filter(|o| o.type_ == 3).map(|o| o.org_uuid.clone()).collect();
|
||||
let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect();
|
||||
let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect();
|
||||
let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect();
|
||||
let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
|
||||
|
||||
|
||||
// Create the JWT claims struct, to send to the client
|
||||
use crate::auth::{encode_jwt, JWTClaims, DEFAULT_VALIDITY, JWT_ISSUER};
|
||||
let claims = JWTClaims {
|
||||
use crate::auth::{encode_jwt, LoginJWTClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER};
|
||||
let claims = LoginJWTClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + *DEFAULT_VALIDITY).timestamp(),
|
||||
iss: JWT_ISSUER.to_string(),
|
||||
iss: JWT_LOGIN_ISSUER.to_string(),
|
||||
sub: user.uuid.to_string(),
|
||||
|
||||
premium: true,
|
||||
|
@@ -21,9 +21,9 @@ pub struct UserOrganization {
|
||||
pub org_uuid: String,
|
||||
|
||||
pub access_all: bool,
|
||||
pub key: String,
|
||||
pub akey: String,
|
||||
pub status: i32,
|
||||
pub type_: i32,
|
||||
pub atype: i32,
|
||||
}
|
||||
|
||||
pub enum UserOrgStatus {
|
||||
@@ -196,9 +196,9 @@ impl UserOrganization {
|
||||
org_uuid,
|
||||
|
||||
access_all: false,
|
||||
key: String::new(),
|
||||
akey: String::new(),
|
||||
status: UserOrgStatus::Accepted as i32,
|
||||
type_: UserOrgType::User as i32,
|
||||
atype: UserOrgType::User as i32,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -213,7 +213,7 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Organization {
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
UserOrganization::find_by_org(&self.uuid, conn)
|
||||
.iter()
|
||||
.for_each(|user_org| {
|
||||
@@ -221,7 +221,7 @@ impl Organization {
|
||||
});
|
||||
|
||||
diesel::replace_into(organizations::table)
|
||||
.values(&*self)
|
||||
.values(self)
|
||||
.execute(&**conn)
|
||||
.map_res("Error saving organization")
|
||||
}
|
||||
@@ -266,9 +266,9 @@ impl UserOrganization {
|
||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||
|
||||
// These are per user
|
||||
"Key": self.key,
|
||||
"Key": self.akey,
|
||||
"Status": self.status,
|
||||
"Type": self.type_,
|
||||
"Type": self.atype,
|
||||
"Enabled": true,
|
||||
|
||||
"Object": "profileOrganization",
|
||||
@@ -285,25 +285,17 @@ impl UserOrganization {
|
||||
"Email": user.email,
|
||||
|
||||
"Status": self.status,
|
||||
"Type": self.type_,
|
||||
"Type": self.atype,
|
||||
"AccessAll": self.access_all,
|
||||
|
||||
"Object": "organizationUserUserDetails",
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_collection_user_details(&self, read_only: bool, conn: &DbConn) -> Value {
|
||||
let user = User::find_by_uuid(&self.user_uuid, conn).unwrap();
|
||||
|
||||
pub fn to_json_collection_user_details(&self, read_only: bool) -> Value {
|
||||
json!({
|
||||
"OrganizationUserId": self.uuid,
|
||||
"AccessAll": self.access_all,
|
||||
"Name": user.name,
|
||||
"Email": user.email,
|
||||
"Type": self.type_,
|
||||
"Status": self.status,
|
||||
"ReadOnly": read_only,
|
||||
"Object": "collectionUser",
|
||||
"Id": self.uuid,
|
||||
"ReadOnly": read_only
|
||||
})
|
||||
}
|
||||
|
||||
@@ -323,7 +315,7 @@ impl UserOrganization {
|
||||
"UserId": self.user_uuid,
|
||||
|
||||
"Status": self.status,
|
||||
"Type": self.type_,
|
||||
"Type": self.atype,
|
||||
"AccessAll": self.access_all,
|
||||
"Collections": coll_uuids,
|
||||
|
||||
@@ -331,11 +323,11 @@ impl UserOrganization {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn);
|
||||
|
||||
diesel::replace_into(users_organizations::table)
|
||||
.values(&*self)
|
||||
.values(self)
|
||||
.execute(&**conn)
|
||||
.map_res("Error adding user to organization")
|
||||
}
|
||||
@@ -365,7 +357,7 @@ impl UserOrganization {
|
||||
}
|
||||
|
||||
pub fn has_full_access(self) -> bool {
|
||||
self.access_all || self.type_ >= UserOrgType::Admin
|
||||
self.access_all || self.atype >= UserOrgType::Admin
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
@@ -413,10 +405,10 @@ impl UserOrganization {
|
||||
.expect("Error loading user organizations")
|
||||
}
|
||||
|
||||
pub fn find_by_org_and_type(org_uuid: &str, type_: i32, conn: &DbConn) -> Vec<Self> {
|
||||
pub fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Vec<Self> {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
.filter(users_organizations::type_.eq(type_))
|
||||
.filter(users_organizations::atype.eq(atype))
|
||||
.load::<Self>(&**conn)
|
||||
.expect("Error loading user organizations")
|
||||
}
|
||||
|
@@ -9,13 +9,13 @@ use super::User;
|
||||
pub struct TwoFactor {
|
||||
pub uuid: String,
|
||||
pub user_uuid: String,
|
||||
pub type_: i32,
|
||||
pub atype: i32,
|
||||
pub enabled: bool,
|
||||
pub data: String,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(FromPrimitive, ToPrimitive)]
|
||||
#[derive(FromPrimitive)]
|
||||
pub enum TwoFactorType {
|
||||
Authenticator = 0,
|
||||
Email = 1,
|
||||
@@ -32,31 +32,16 @@ pub enum TwoFactorType {
|
||||
|
||||
/// Local methods
|
||||
impl TwoFactor {
|
||||
pub fn new(user_uuid: String, type_: TwoFactorType, data: String) -> Self {
|
||||
pub fn new(user_uuid: String, atype: TwoFactorType, data: String) -> Self {
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
user_uuid,
|
||||
type_: type_ as i32,
|
||||
atype: atype as i32,
|
||||
enabled: true,
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_totp_code(&self, totp_code: u64) -> bool {
|
||||
let totp_secret = self.data.as_bytes();
|
||||
|
||||
use data_encoding::BASE32;
|
||||
use oath::{totp_raw_now, HashType};
|
||||
|
||||
let decoded_secret = match BASE32.decode(totp_secret) {
|
||||
Ok(s) => s,
|
||||
Err(_) => return false,
|
||||
};
|
||||
|
||||
let generated = totp_raw_now(&decoded_secret, 6, 0, 30, &HashType::SHA1);
|
||||
generated == totp_code
|
||||
}
|
||||
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"Enabled": self.enabled,
|
||||
@@ -68,7 +53,7 @@ impl TwoFactor {
|
||||
pub fn to_json_list(&self) -> Value {
|
||||
json!({
|
||||
"Enabled": self.enabled,
|
||||
"Type": self.type_,
|
||||
"Type": self.atype,
|
||||
"Object": "twoFactorProvider"
|
||||
})
|
||||
}
|
||||
@@ -100,14 +85,15 @@ impl TwoFactor {
|
||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
twofactor::table
|
||||
.filter(twofactor::user_uuid.eq(user_uuid))
|
||||
.filter(twofactor::atype.lt(1000)) // Filter implementation types
|
||||
.load::<Self>(&**conn)
|
||||
.expect("Error loading twofactor")
|
||||
}
|
||||
|
||||
pub fn find_by_user_and_type(user_uuid: &str, type_: i32, conn: &DbConn) -> Option<Self> {
|
||||
pub fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &DbConn) -> Option<Self> {
|
||||
twofactor::table
|
||||
.filter(twofactor::user_uuid.eq(user_uuid))
|
||||
.filter(twofactor::type_.eq(type_))
|
||||
.filter(twofactor::atype.eq(atype))
|
||||
.first::<Self>(&**conn)
|
||||
.ok()
|
||||
}
|
||||
|
@@ -20,7 +20,7 @@ pub struct User {
|
||||
pub password_iterations: i32,
|
||||
pub password_hint: Option<String>,
|
||||
|
||||
pub key: String,
|
||||
pub akey: String,
|
||||
pub private_key: Option<String>,
|
||||
pub public_key: Option<String>,
|
||||
|
||||
@@ -37,6 +37,12 @@ pub struct User {
|
||||
pub client_kdf_iter: i32,
|
||||
}
|
||||
|
||||
enum UserStatus {
|
||||
Enabled = 0,
|
||||
Invited = 1,
|
||||
_Disabled = 2,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
impl User {
|
||||
pub const CLIENT_KDF_TYPE_DEFAULT: i32 = 0; // PBKDF2: 0
|
||||
@@ -52,11 +58,11 @@ impl User {
|
||||
updated_at: now,
|
||||
name: email.clone(),
|
||||
email,
|
||||
key: String::new(),
|
||||
akey: String::new(),
|
||||
|
||||
password_hash: Vec::new(),
|
||||
salt: crypto::get_random_64(),
|
||||
password_iterations: CONFIG.password_iterations,
|
||||
password_iterations: CONFIG.password_iterations(),
|
||||
|
||||
security_stamp: crate::util::get_uuid(),
|
||||
|
||||
@@ -86,7 +92,7 @@ impl User {
|
||||
|
||||
pub fn check_valid_recovery_code(&self, recovery_code: &str) -> bool {
|
||||
if let Some(ref totp_recover) = self.totp_recover {
|
||||
recovery_code == totp_recover.to_lowercase()
|
||||
crate::crypto::ct_eq(recovery_code, totp_recover.to_lowercase())
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@@ -113,13 +119,19 @@ use crate::error::MapResult;
|
||||
/// Database methods
|
||||
impl User {
|
||||
pub fn to_json(&self, conn: &DbConn) -> Value {
|
||||
use super::{TwoFactor, UserOrganization};
|
||||
|
||||
let orgs = UserOrganization::find_by_user(&self.uuid, conn);
|
||||
let orgs_json: Vec<Value> = orgs.iter().map(|c| c.to_json(&conn)).collect();
|
||||
let twofactor_enabled = !TwoFactor::find_by_user(&self.uuid, conn).is_empty();
|
||||
|
||||
// TODO: Might want to save the status field in the DB
|
||||
let status = if self.password_hash.is_empty() {
|
||||
UserStatus::Invited
|
||||
} else {
|
||||
UserStatus::Enabled
|
||||
};
|
||||
|
||||
json!({
|
||||
"_Status": status as i32,
|
||||
"Id": self.uuid,
|
||||
"Name": self.name,
|
||||
"Email": self.email,
|
||||
@@ -128,7 +140,7 @@ impl User {
|
||||
"MasterPasswordHint": self.password_hint,
|
||||
"Culture": "en-US",
|
||||
"TwoFactorEnabled": twofactor_enabled,
|
||||
"Key": self.key,
|
||||
"Key": self.akey,
|
||||
"PrivateKey": self.private_key,
|
||||
"SecurityStamp": self.security_stamp,
|
||||
"Organizations": orgs_json,
|
||||
@@ -137,6 +149,10 @@ impl User {
|
||||
}
|
||||
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
if self.email.trim().is_empty() {
|
||||
err!("User email can't be empty")
|
||||
}
|
||||
|
||||
self.updated_at = Utc::now().naive_utc();
|
||||
|
||||
diesel::replace_into(users::table) // Insert or update
|
||||
@@ -147,7 +163,7 @@ impl User {
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
for user_org in UserOrganization::find_by_user(&self.uuid, &*conn) {
|
||||
if user_org.type_ == UserOrgType::Owner {
|
||||
if user_org.atype == UserOrgType::Owner {
|
||||
let owner_type = UserOrgType::Owner as i32;
|
||||
if UserOrganization::find_by_org_and_type(&user_org.org_uuid, owner_type, &conn).len() <= 1 {
|
||||
err!("Can't delete last owner")
|
||||
@@ -168,19 +184,41 @@ impl User {
|
||||
}
|
||||
|
||||
pub fn update_uuid_revision(uuid: &str, conn: &DbConn) {
|
||||
if let Some(mut user) = User::find_by_uuid(&uuid, conn) {
|
||||
if user.update_revision(conn).is_err() {
|
||||
warn!("Failed to update revision for {}", user.email);
|
||||
};
|
||||
};
|
||||
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn) {
|
||||
warn!("Failed to update revision for {}: {:#?}", uuid, e);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_all_revisions(conn: &DbConn) -> EmptyResult {
|
||||
let updated_at = Utc::now().naive_utc();
|
||||
|
||||
crate::util::retry(
|
||||
|| {
|
||||
diesel::update(users::table)
|
||||
.set(users::updated_at.eq(updated_at))
|
||||
.execute(&**conn)
|
||||
},
|
||||
10,
|
||||
)
|
||||
.map_res("Error updating revision date for all users")
|
||||
}
|
||||
|
||||
pub fn update_revision(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
self.updated_at = Utc::now().naive_utc();
|
||||
diesel::update(users::table.filter(users::uuid.eq(&self.uuid)))
|
||||
.set(users::updated_at.eq(&self.updated_at))
|
||||
.execute(&**conn)
|
||||
.map_res("Error updating user revision")
|
||||
|
||||
Self::_update_revision(&self.uuid, &self.updated_at, conn)
|
||||
}
|
||||
|
||||
fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &DbConn) -> EmptyResult {
|
||||
crate::util::retry(
|
||||
|| {
|
||||
diesel::update(users::table.filter(users::uuid.eq(uuid)))
|
||||
.set(users::updated_at.eq(date))
|
||||
.execute(&**conn)
|
||||
},
|
||||
10,
|
||||
)
|
||||
.map_res("Error updating user revision")
|
||||
}
|
||||
|
||||
pub fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
|
||||
@@ -212,9 +250,13 @@ impl Invitation {
|
||||
Self { email }
|
||||
}
|
||||
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
if self.email.trim().is_empty() {
|
||||
err!("Invitation email can't be empty")
|
||||
}
|
||||
|
||||
diesel::replace_into(invitations::table)
|
||||
.values(&*self)
|
||||
.values(self)
|
||||
.execute(&**conn)
|
||||
.map_res("Error saving invitation")
|
||||
}
|
||||
@@ -234,7 +276,7 @@ impl Invitation {
|
||||
}
|
||||
|
||||
pub fn take(mail: &str, conn: &DbConn) -> bool {
|
||||
CONFIG.invitations_allowed
|
||||
CONFIG.invitations_allowed()
|
||||
&& match Self::find_by_mail(mail, &conn) {
|
||||
Some(invitation) => invitation.delete(&conn).is_ok(),
|
||||
None => false,
|
||||
|
172
src/db/schemas/mysql/schema.rs
Normal file
172
src/db/schemas/mysql/schema.rs
Normal file
@@ -0,0 +1,172 @@
|
||||
table! {
|
||||
attachments (id) {
|
||||
id -> Varchar,
|
||||
cipher_uuid -> Varchar,
|
||||
file_name -> Text,
|
||||
file_size -> Integer,
|
||||
akey -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
ciphers (uuid) {
|
||||
uuid -> Varchar,
|
||||
created_at -> Datetime,
|
||||
updated_at -> Datetime,
|
||||
user_uuid -> Nullable<Varchar>,
|
||||
organization_uuid -> Nullable<Varchar>,
|
||||
atype -> Integer,
|
||||
name -> Text,
|
||||
notes -> Nullable<Text>,
|
||||
fields -> Nullable<Text>,
|
||||
data -> Text,
|
||||
favorite -> Bool,
|
||||
password_history -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
ciphers_collections (cipher_uuid, collection_uuid) {
|
||||
cipher_uuid -> Varchar,
|
||||
collection_uuid -> Varchar,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
collections (uuid) {
|
||||
uuid -> Varchar,
|
||||
org_uuid -> Varchar,
|
||||
name -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
devices (uuid) {
|
||||
uuid -> Varchar,
|
||||
created_at -> Datetime,
|
||||
updated_at -> Datetime,
|
||||
user_uuid -> Varchar,
|
||||
name -> Text,
|
||||
atype -> Integer,
|
||||
push_token -> Nullable<Text>,
|
||||
refresh_token -> Text,
|
||||
twofactor_remember -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
folders (uuid) {
|
||||
uuid -> Varchar,
|
||||
created_at -> Datetime,
|
||||
updated_at -> Datetime,
|
||||
user_uuid -> Varchar,
|
||||
name -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
folders_ciphers (cipher_uuid, folder_uuid) {
|
||||
cipher_uuid -> Varchar,
|
||||
folder_uuid -> Varchar,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
invitations (email) {
|
||||
email -> Varchar,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
organizations (uuid) {
|
||||
uuid -> Varchar,
|
||||
name -> Text,
|
||||
billing_email -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
twofactor (uuid) {
|
||||
uuid -> Varchar,
|
||||
user_uuid -> Varchar,
|
||||
atype -> Integer,
|
||||
enabled -> Bool,
|
||||
data -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
users (uuid) {
|
||||
uuid -> Varchar,
|
||||
created_at -> Datetime,
|
||||
updated_at -> Datetime,
|
||||
email -> Varchar,
|
||||
name -> Text,
|
||||
password_hash -> Blob,
|
||||
salt -> Blob,
|
||||
password_iterations -> Integer,
|
||||
password_hint -> Nullable<Text>,
|
||||
akey -> Text,
|
||||
private_key -> Nullable<Text>,
|
||||
public_key -> Nullable<Text>,
|
||||
totp_secret -> Nullable<Text>,
|
||||
totp_recover -> Nullable<Text>,
|
||||
security_stamp -> Text,
|
||||
equivalent_domains -> Text,
|
||||
excluded_globals -> Text,
|
||||
client_kdf_type -> Integer,
|
||||
client_kdf_iter -> Integer,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
users_collections (user_uuid, collection_uuid) {
|
||||
user_uuid -> Varchar,
|
||||
collection_uuid -> Varchar,
|
||||
read_only -> Bool,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
users_organizations (uuid) {
|
||||
uuid -> Varchar,
|
||||
user_uuid -> Varchar,
|
||||
org_uuid -> Varchar,
|
||||
access_all -> Bool,
|
||||
akey -> Text,
|
||||
status -> Integer,
|
||||
atype -> Integer,
|
||||
}
|
||||
}
|
||||
|
||||
joinable!(attachments -> ciphers (cipher_uuid));
|
||||
joinable!(ciphers -> organizations (organization_uuid));
|
||||
joinable!(ciphers -> users (user_uuid));
|
||||
joinable!(ciphers_collections -> ciphers (cipher_uuid));
|
||||
joinable!(ciphers_collections -> collections (collection_uuid));
|
||||
joinable!(collections -> organizations (org_uuid));
|
||||
joinable!(devices -> users (user_uuid));
|
||||
joinable!(folders -> users (user_uuid));
|
||||
joinable!(folders_ciphers -> ciphers (cipher_uuid));
|
||||
joinable!(folders_ciphers -> folders (folder_uuid));
|
||||
joinable!(twofactor -> users (user_uuid));
|
||||
joinable!(users_collections -> collections (collection_uuid));
|
||||
joinable!(users_collections -> users (user_uuid));
|
||||
joinable!(users_organizations -> organizations (org_uuid));
|
||||
joinable!(users_organizations -> users (user_uuid));
|
||||
|
||||
allow_tables_to_appear_in_same_query!(
|
||||
attachments,
|
||||
ciphers,
|
||||
ciphers_collections,
|
||||
collections,
|
||||
devices,
|
||||
folders,
|
||||
folders_ciphers,
|
||||
invitations,
|
||||
organizations,
|
||||
twofactor,
|
||||
users,
|
||||
users_collections,
|
||||
users_organizations,
|
||||
);
|
@@ -4,7 +4,7 @@ table! {
|
||||
cipher_uuid -> Text,
|
||||
file_name -> Text,
|
||||
file_size -> Integer,
|
||||
key -> Nullable<Text>,
|
||||
akey -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,8 +15,7 @@ table! {
|
||||
updated_at -> Timestamp,
|
||||
user_uuid -> Nullable<Text>,
|
||||
organization_uuid -> Nullable<Text>,
|
||||
#[sql_name = "type"]
|
||||
type_ -> Integer,
|
||||
atype -> Integer,
|
||||
name -> Text,
|
||||
notes -> Nullable<Text>,
|
||||
fields -> Nullable<Text>,
|
||||
@@ -48,8 +47,7 @@ table! {
|
||||
updated_at -> Timestamp,
|
||||
user_uuid -> Text,
|
||||
name -> Text,
|
||||
#[sql_name = "type"]
|
||||
type_ -> Integer,
|
||||
atype -> Integer,
|
||||
push_token -> Nullable<Text>,
|
||||
refresh_token -> Text,
|
||||
twofactor_remember -> Nullable<Text>,
|
||||
@@ -91,8 +89,7 @@ table! {
|
||||
twofactor (uuid) {
|
||||
uuid -> Text,
|
||||
user_uuid -> Text,
|
||||
#[sql_name = "type"]
|
||||
type_ -> Integer,
|
||||
atype -> Integer,
|
||||
enabled -> Bool,
|
||||
data -> Text,
|
||||
}
|
||||
@@ -109,7 +106,7 @@ table! {
|
||||
salt -> Binary,
|
||||
password_iterations -> Integer,
|
||||
password_hint -> Nullable<Text>,
|
||||
key -> Text,
|
||||
akey -> Text,
|
||||
private_key -> Nullable<Text>,
|
||||
public_key -> Nullable<Text>,
|
||||
totp_secret -> Nullable<Text>,
|
||||
@@ -136,10 +133,9 @@ table! {
|
||||
user_uuid -> Text,
|
||||
org_uuid -> Text,
|
||||
access_all -> Bool,
|
||||
key -> Text,
|
||||
akey -> Text,
|
||||
status -> Integer,
|
||||
#[sql_name = "type"]
|
||||
type_ -> Integer,
|
||||
atype -> Integer,
|
||||
}
|
||||
}
|
||||
|
69
src/error.rs
69
src/error.rs
@@ -4,17 +4,19 @@
|
||||
use std::error::Error as StdError;
|
||||
|
||||
macro_rules! make_error {
|
||||
( $( $name:ident ( $ty:ty ): $src_fn:expr, $usr_msg_fun:expr ),+ $(,)* ) => {
|
||||
( $( $name:ident ( $ty:ty ): $src_fn:expr, $usr_msg_fun:expr ),+ $(,)? ) => {
|
||||
const BAD_REQUEST: u16 = 400;
|
||||
|
||||
#[derive(Display)]
|
||||
enum ErrorKind { $($name( $ty )),+ }
|
||||
pub struct Error { message: String, error: ErrorKind }
|
||||
pub enum ErrorKind { $($name( $ty )),+ }
|
||||
pub struct Error { message: String, error: ErrorKind, error_code: u16 }
|
||||
|
||||
$(impl From<$ty> for Error {
|
||||
fn from(err: $ty) -> Self { Error::from((stringify!($name), err)) }
|
||||
})+
|
||||
$(impl<S: Into<String>> From<(S, $ty)> for Error {
|
||||
fn from(val: (S, $ty)) -> Self {
|
||||
Error { message: val.0.into(), error: ErrorKind::$name(val.1) }
|
||||
Error { message: val.0.into(), error: ErrorKind::$name(val.1), error_code: BAD_REQUEST }
|
||||
}
|
||||
})+
|
||||
impl StdError for Error {
|
||||
@@ -32,11 +34,21 @@ macro_rules! make_error {
|
||||
};
|
||||
}
|
||||
|
||||
use diesel::result::Error as DieselError;
|
||||
use jsonwebtoken::errors::Error as JwtError;
|
||||
use serde_json::{Error as SerError, Value};
|
||||
use std::io::Error as IOError;
|
||||
use diesel::result::Error as DieselErr;
|
||||
use handlebars::RenderError as HbErr;
|
||||
use jsonwebtoken::errors::Error as JWTErr;
|
||||
use regex::Error as RegexErr;
|
||||
use reqwest::Error as ReqErr;
|
||||
use serde_json::{Error as SerdeErr, Value};
|
||||
use std::io::Error as IOErr;
|
||||
|
||||
use std::option::NoneError as NoneErr;
|
||||
use std::time::SystemTimeError as TimeErr;
|
||||
use u2f::u2ferror::U2fError as U2fErr;
|
||||
use yubico::yubicoerror::YubicoError as YubiErr;
|
||||
|
||||
#[derive(Display, Serialize)]
|
||||
pub struct Empty {}
|
||||
|
||||
// Error struct
|
||||
// Contains a String error message, meant for the user and an enum variant, with an error of different types.
|
||||
@@ -44,16 +56,30 @@ use u2f::u2ferror::U2fError as U2fErr;
|
||||
// After the variant itself, there are two expressions. The first one indicates whether the error contains a source error (that we pretty print).
|
||||
// The second one contains the function used to obtain the response sent to the client
|
||||
make_error! {
|
||||
// Just an empty error
|
||||
EmptyError(Empty): _no_source, _serialize,
|
||||
// Used to represent err! calls
|
||||
SimpleError(String): _no_source, _api_error,
|
||||
// Used for special return values, like 2FA errors
|
||||
JsonError(Value): _no_source, _serialize,
|
||||
DbError(DieselError): _has_source, _api_error,
|
||||
DbError(DieselErr): _has_source, _api_error,
|
||||
U2fError(U2fErr): _has_source, _api_error,
|
||||
SerdeError(SerError): _has_source, _api_error,
|
||||
JWTError(JwtError): _has_source, _api_error,
|
||||
IoErrror(IOError): _has_source, _api_error,
|
||||
SerdeError(SerdeErr): _has_source, _api_error,
|
||||
JWTError(JWTErr): _has_source, _api_error,
|
||||
TemplError(HbErr): _has_source, _api_error,
|
||||
//WsError(ws::Error): _has_source, _api_error,
|
||||
IOError(IOErr): _has_source, _api_error,
|
||||
TimeError(TimeErr): _has_source, _api_error,
|
||||
ReqError(ReqErr): _has_source, _api_error,
|
||||
RegexError(RegexErr): _has_source, _api_error,
|
||||
YubiError(YubiErr): _has_source, _api_error,
|
||||
}
|
||||
|
||||
// This is implemented by hand because NoneError doesn't implement neither Display nor Error
|
||||
impl From<NoneErr> for Error {
|
||||
fn from(_: NoneErr) -> Self {
|
||||
Error::from(("NoneError", String::new()))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Error {
|
||||
@@ -70,10 +96,19 @@ impl Error {
|
||||
(usr_msg, log_msg.into()).into()
|
||||
}
|
||||
|
||||
pub fn empty() -> Self {
|
||||
Empty {}.into()
|
||||
}
|
||||
|
||||
pub fn with_msg<M: Into<String>>(mut self, msg: M) -> Self {
|
||||
self.message = msg.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_code(mut self, code: u16) -> Self {
|
||||
self.error_code = code;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub trait MapResult<S> {
|
||||
@@ -92,6 +127,12 @@ impl<E: Into<Error>> MapResult<()> for Result<usize, E> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> MapResult<S> for Option<S> {
|
||||
fn map_res(self, msg: &str) -> Result<S, Error> {
|
||||
self.ok_or_else(|| Error::new(msg, ""))
|
||||
}
|
||||
}
|
||||
|
||||
fn _has_source<T>(e: T) -> Option<T> {
|
||||
Some(e)
|
||||
}
|
||||
@@ -132,8 +173,10 @@ impl<'r> Responder<'r> for Error {
|
||||
let usr_msg = format!("{}", self);
|
||||
error!("{:#?}", self);
|
||||
|
||||
let code = Status::from_code(self.error_code).unwrap_or(Status::BadRequest);
|
||||
|
||||
Response::build()
|
||||
.status(Status::BadRequest)
|
||||
.status(code)
|
||||
.header(ContentType::JSON)
|
||||
.sized_body(Cursor::new(usr_msg))
|
||||
.ok()
|
||||
|
236
src/mail.rs
236
src/mail.rs
@@ -1,59 +1,95 @@
|
||||
use lettre::smtp::authentication::Credentials;
|
||||
use lettre::smtp::authentication::Mechanism as SmtpAuthMechanism;
|
||||
use lettre::smtp::ConnectionReuseParameters;
|
||||
use lettre::{ClientSecurity, ClientTlsParameters, SmtpClient, SmtpTransport, Transport};
|
||||
use lettre_email::EmailBuilder;
|
||||
use lettre_email::{EmailBuilder, MimeMultipartType, PartBuilder};
|
||||
use native_tls::{Protocol, TlsConnector};
|
||||
use percent_encoding::{percent_encode, NON_ALPHANUMERIC};
|
||||
use quoted_printable::encode_to_str;
|
||||
|
||||
use crate::MailConfig;
|
||||
use crate::CONFIG;
|
||||
use crate::auth::{generate_invite_claims, encode_jwt};
|
||||
use crate::api::EmptyResult;
|
||||
use crate::auth::{encode_jwt, generate_invite_claims};
|
||||
use crate::error::Error;
|
||||
use crate::CONFIG;
|
||||
use chrono::NaiveDateTime;
|
||||
|
||||
fn mailer(config: &MailConfig) -> SmtpTransport {
|
||||
let client_security = if config.smtp_ssl {
|
||||
fn mailer() -> SmtpTransport {
|
||||
let host = CONFIG.smtp_host().unwrap();
|
||||
|
||||
let client_security = if CONFIG.smtp_ssl() {
|
||||
let tls = TlsConnector::builder()
|
||||
.min_protocol_version(Some(Protocol::Tlsv11))
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
ClientSecurity::Required(ClientTlsParameters::new(config.smtp_host.clone(), tls))
|
||||
let params = ClientTlsParameters::new(host.clone(), tls);
|
||||
|
||||
if CONFIG.smtp_explicit_tls() {
|
||||
ClientSecurity::Wrapper(params)
|
||||
} else {
|
||||
ClientSecurity::Required(params)
|
||||
}
|
||||
} else {
|
||||
ClientSecurity::None
|
||||
};
|
||||
|
||||
let smtp_client = SmtpClient::new((config.smtp_host.as_str(), config.smtp_port), client_security).unwrap();
|
||||
let smtp_client = SmtpClient::new((host.as_str(), CONFIG.smtp_port()), client_security).unwrap();
|
||||
|
||||
let smtp_client = match (&config.smtp_username, &config.smtp_password) {
|
||||
let smtp_client = match (&CONFIG.smtp_username(), &CONFIG.smtp_password()) {
|
||||
(Some(user), Some(pass)) => smtp_client.credentials(Credentials::new(user.clone(), pass.clone())),
|
||||
_ => smtp_client,
|
||||
};
|
||||
|
||||
let smtp_client = match &CONFIG.smtp_auth_mechanism() {
|
||||
Some(auth_mechanism_json) => {
|
||||
let auth_mechanism = serde_json::from_str::<SmtpAuthMechanism>(&auth_mechanism_json);
|
||||
match auth_mechanism {
|
||||
Ok(auth_mechanism) => smtp_client.authentication_mechanism(auth_mechanism),
|
||||
Err(_) => panic!("Failure to parse mechanism. Is it proper Json? Eg. `\"Plain\"` not `Plain`"),
|
||||
}
|
||||
},
|
||||
_ => smtp_client,
|
||||
};
|
||||
|
||||
smtp_client
|
||||
.smtp_utf8(true)
|
||||
.connection_reuse(ConnectionReuseParameters::NoReuse)
|
||||
.transport()
|
||||
}
|
||||
|
||||
pub fn send_password_hint(address: &str, hint: Option<String>, config: &MailConfig) -> EmptyResult {
|
||||
let (subject, body) = if let Some(hint) = hint {
|
||||
(
|
||||
"Your master password hint",
|
||||
format!(
|
||||
"You (or someone) recently requested your master password hint.\n\n\
|
||||
Your hint is: \"{}\"\n\n\
|
||||
If you did not request your master password hint you can safely ignore this email.\n",
|
||||
hint
|
||||
),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
"Sorry, you have no password hint...",
|
||||
"Sorry, you have not specified any password hint...\n".into(),
|
||||
)
|
||||
fn get_text(template_name: &'static str, data: serde_json::Value) -> Result<(String, String, String), Error> {
|
||||
let (subject_html, body_html) = get_template(&format!("{}.html", template_name), &data)?;
|
||||
let (_subject_text, body_text) = get_template(template_name, &data)?;
|
||||
Ok((subject_html, body_html, body_text))
|
||||
}
|
||||
|
||||
fn get_template(template_name: &str, data: &serde_json::Value) -> Result<(String, String), Error> {
|
||||
let text = CONFIG.render_template(template_name, data)?;
|
||||
let mut text_split = text.split("<!---------------->");
|
||||
|
||||
let subject = match text_split.next() {
|
||||
Some(s) => s.trim().to_string(),
|
||||
None => err!("Template doesn't contain subject"),
|
||||
};
|
||||
|
||||
send_email(&address, &subject, &body, &config)
|
||||
let body = match text_split.next() {
|
||||
Some(s) => s.trim().to_string(),
|
||||
None => err!("Template doesn't contain body"),
|
||||
};
|
||||
|
||||
Ok((subject, body))
|
||||
}
|
||||
|
||||
pub fn send_password_hint(address: &str, hint: Option<String>) -> EmptyResult {
|
||||
let template_name = if hint.is_some() {
|
||||
"email/pw_hint_some"
|
||||
} else {
|
||||
"email/pw_hint_none"
|
||||
};
|
||||
|
||||
let (subject, body_html, body_text) = get_text(template_name, json!({ "hint": hint, "url": CONFIG.domain() }))?;
|
||||
|
||||
send_email(&address, &subject, &body_html, &body_text)
|
||||
}
|
||||
|
||||
pub fn send_invite(
|
||||
@@ -63,77 +99,109 @@ pub fn send_invite(
|
||||
org_user_id: Option<String>,
|
||||
org_name: &str,
|
||||
invited_by_email: Option<String>,
|
||||
config: &MailConfig,
|
||||
) -> EmptyResult {
|
||||
let claims = generate_invite_claims(
|
||||
uuid.to_string(),
|
||||
String::from(address),
|
||||
org_id.clone(),
|
||||
org_user_id.clone(),
|
||||
invited_by_email.clone(),
|
||||
);
|
||||
uuid.to_string(),
|
||||
String::from(address),
|
||||
org_id.clone(),
|
||||
org_user_id.clone(),
|
||||
invited_by_email.clone(),
|
||||
);
|
||||
let invite_token = encode_jwt(&claims);
|
||||
let (subject, body) = {
|
||||
(format!("Join {}", &org_name),
|
||||
format!(
|
||||
"<html>
|
||||
<p>You have been invited to join the <b>{}</b> organization.<br><br>
|
||||
<a href=\"{}/#/accept-organization/?organizationId={}&organizationUserId={}&email={}&organizationName={}&token={}\">
|
||||
Click here to join</a></p>
|
||||
<p>If you do not wish to join this organization, you can safely ignore this email.</p>
|
||||
</html>",
|
||||
org_name, CONFIG.domain, org_id.unwrap_or("_".to_string()), org_user_id.unwrap_or("_".to_string()), address, org_name, invite_token
|
||||
))
|
||||
};
|
||||
|
||||
send_email(&address, &subject, &body, &config)
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/send_org_invite",
|
||||
json!({
|
||||
"url": CONFIG.domain(),
|
||||
"org_id": org_id.unwrap_or_else(|| "_".to_string()),
|
||||
"org_user_id": org_user_id.unwrap_or_else(|| "_".to_string()),
|
||||
"email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(),
|
||||
"org_name": org_name,
|
||||
"token": invite_token,
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(&address, &subject, &body_html, &body_text)
|
||||
}
|
||||
|
||||
pub fn send_invite_accepted(
|
||||
new_user_email: &str,
|
||||
address: &str,
|
||||
org_name: &str,
|
||||
config: &MailConfig,
|
||||
) -> EmptyResult {
|
||||
let (subject, body) = {
|
||||
("Invitation accepted",
|
||||
format!(
|
||||
"<html>
|
||||
<p>Your invitation for <b>{}</b> to join <b>{}</b> was accepted. Please <a href=\"{}\">log in</a> to the bitwarden_rs server and confirm them from the organization management page.</p>
|
||||
</html>", new_user_email, org_name, CONFIG.domain))
|
||||
};
|
||||
pub fn send_invite_accepted(new_user_email: &str, address: &str, org_name: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/invite_accepted",
|
||||
json!({
|
||||
"url": CONFIG.domain(),
|
||||
"email": new_user_email,
|
||||
"org_name": org_name,
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(&address, &subject, &body, &config)
|
||||
send_email(&address, &subject, &body_html, &body_text)
|
||||
}
|
||||
|
||||
pub fn send_invite_confirmed(
|
||||
address: &str,
|
||||
org_name: &str,
|
||||
config: &MailConfig,
|
||||
) -> EmptyResult {
|
||||
let (subject, body) = {
|
||||
(format!("Invitation to {} confirmed", org_name),
|
||||
format!(
|
||||
"<html>
|
||||
<p>Your invitation to join <b>{}</b> was confirmed. It will now appear under the Organizations the next time you <a href=\"{}\">log in</a> to the web vault.</p>
|
||||
</html>", org_name, CONFIG.domain))
|
||||
};
|
||||
pub fn send_invite_confirmed(address: &str, org_name: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/invite_confirmed",
|
||||
json!({
|
||||
"url": CONFIG.domain(),
|
||||
"org_name": org_name,
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(&address, &subject, &body, &config)
|
||||
send_email(&address, &subject, &body_html, &body_text)
|
||||
}
|
||||
|
||||
fn send_email(address: &str, subject: &str, body: &str, config: &MailConfig) -> EmptyResult {
|
||||
pub fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult {
|
||||
use crate::util::upcase_first;
|
||||
let device = upcase_first(device);
|
||||
|
||||
let datetime = dt.format("%A, %B %_d, %Y at %H:%M").to_string();
|
||||
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/new_device_logged_in",
|
||||
json!({
|
||||
"url": CONFIG.domain(),
|
||||
"ip": ip,
|
||||
"device": device,
|
||||
"datetime": datetime,
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(&address, &subject, &body_html, &body_text)
|
||||
}
|
||||
|
||||
fn send_email(address: &str, subject: &str, body_html: &str, body_text: &str) -> EmptyResult {
|
||||
let html = PartBuilder::new()
|
||||
.body(encode_to_str(body_html))
|
||||
.header(("Content-Type", "text/html; charset=utf-8"))
|
||||
.header(("Content-Transfer-Encoding", "quoted-printable"))
|
||||
.build();
|
||||
|
||||
let text = PartBuilder::new()
|
||||
.body(encode_to_str(body_text))
|
||||
.header(("Content-Type", "text/plain; charset=utf-8"))
|
||||
.header(("Content-Transfer-Encoding", "quoted-printable"))
|
||||
.build();
|
||||
|
||||
let alternative = PartBuilder::new()
|
||||
.message_type(MimeMultipartType::Alternative)
|
||||
.child(text)
|
||||
.child(html);
|
||||
|
||||
let email = EmailBuilder::new()
|
||||
.to(address)
|
||||
.from((config.smtp_from.clone(), "Bitwarden-rs"))
|
||||
.subject(subject)
|
||||
.header(("Content-Type", "text/html"))
|
||||
.body(body)
|
||||
.build()
|
||||
.map_err(|e| Error::new("Error building email", e.to_string()))?;
|
||||
.to(address)
|
||||
.from((CONFIG.smtp_from().as_str(), CONFIG.smtp_from_name().as_str()))
|
||||
.subject(subject)
|
||||
.child(alternative.build())
|
||||
.build()
|
||||
.map_err(|e| Error::new("Error building email", e.to_string()))?;
|
||||
|
||||
mailer(config)
|
||||
let mut transport = mailer();
|
||||
|
||||
let result = transport
|
||||
.send(email.into())
|
||||
.map_err(|e| Error::new("Error sending email", e.to_string()))
|
||||
.and(Ok(()))
|
||||
}
|
||||
.and(Ok(()));
|
||||
|
||||
// Explicitly close the connection, in case of error
|
||||
transport.close();
|
||||
result
|
||||
}
|
||||
|
350
src/main.rs
350
src/main.rs
@@ -1,6 +1,5 @@
|
||||
#![feature(proc_macro_hygiene, decl_macro, vec_remove_item, try_trait)]
|
||||
#![recursion_limit = "128"]
|
||||
#![allow(proc_macro_derive_resolution_fallback)] // TODO: Remove this when diesel update fixes warnings
|
||||
#![recursion_limit = "256"]
|
||||
|
||||
#[macro_use]
|
||||
extern crate rocket;
|
||||
@@ -21,7 +20,6 @@ extern crate derive_more;
|
||||
#[macro_use]
|
||||
extern crate num_derive;
|
||||
|
||||
use rocket::Rocket;
|
||||
use std::{
|
||||
path::Path,
|
||||
process::{exit, Command},
|
||||
@@ -31,42 +29,19 @@ use std::{
|
||||
mod error;
|
||||
mod api;
|
||||
mod auth;
|
||||
mod config;
|
||||
mod crypto;
|
||||
mod db;
|
||||
mod mail;
|
||||
mod util;
|
||||
|
||||
fn init_rocket() -> Rocket {
|
||||
rocket::ignite()
|
||||
.mount("/", api::web_routes())
|
||||
.mount("/api", api::core_routes())
|
||||
.mount("/admin", api::admin_routes())
|
||||
.mount("/identity", api::identity_routes())
|
||||
.mount("/icons", api::icons_routes())
|
||||
.mount("/notifications", api::notifications_routes())
|
||||
.manage(db::init_pool())
|
||||
.manage(api::start_notification_server())
|
||||
.attach(util::AppHeaders())
|
||||
}
|
||||
|
||||
// Embed the migrations from the migrations folder into the application
|
||||
// This way, the program automatically migrates the database to the latest version
|
||||
// https://docs.rs/diesel_migrations/*/diesel_migrations/macro.embed_migrations.html
|
||||
#[allow(unused_imports)]
|
||||
mod migrations {
|
||||
embed_migrations!();
|
||||
|
||||
pub fn run_migrations() {
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
let connection = crate::db::get_connection().expect("Can't conect to DB");
|
||||
|
||||
use std::io::stdout;
|
||||
embedded_migrations::run_with_output(&connection, &mut stdout()).expect("Can't run migrations");
|
||||
}
|
||||
}
|
||||
pub use config::CONFIG;
|
||||
pub use error::{Error, MapResult};
|
||||
|
||||
fn main() {
|
||||
if CONFIG.extended_logging {
|
||||
launch_info();
|
||||
|
||||
if CONFIG.extended_logging() {
|
||||
init_logging().ok();
|
||||
}
|
||||
|
||||
@@ -75,10 +50,26 @@ fn main() {
|
||||
check_web_vault();
|
||||
migrations::run_migrations();
|
||||
|
||||
init_rocket().launch();
|
||||
launch_rocket();
|
||||
}
|
||||
|
||||
fn launch_info() {
|
||||
println!("/--------------------------------------------------------------------\\");
|
||||
println!("| Starting Bitwarden_RS |");
|
||||
|
||||
if let Some(version) = option_env!("GIT_VERSION") {
|
||||
println!("|{:^68}|", format!("Version {}", version));
|
||||
}
|
||||
|
||||
println!("|--------------------------------------------------------------------|");
|
||||
println!("| This is an *unofficial* Bitwarden implementation, DO NOT use the |");
|
||||
println!("| official channels to report bugs/features, regardless of client. |");
|
||||
println!("| Report URL: https://github.com/dani-garcia/bitwarden_rs/issues/new |");
|
||||
println!("\\--------------------------------------------------------------------/\n");
|
||||
}
|
||||
|
||||
fn init_logging() -> Result<(), fern::InitError> {
|
||||
use std::str::FromStr;
|
||||
let mut logger = fern::Dispatch::new()
|
||||
.format(|out, message, record| {
|
||||
out.finish(format_args!(
|
||||
@@ -89,28 +80,30 @@ fn init_logging() -> Result<(), fern::InitError> {
|
||||
message
|
||||
))
|
||||
})
|
||||
.level(log::LevelFilter::Debug)
|
||||
.level_for("hyper", log::LevelFilter::Warn)
|
||||
.level_for("ws", log::LevelFilter::Info)
|
||||
.level_for("multipart", log::LevelFilter::Info)
|
||||
.level(log::LevelFilter::from_str(&CONFIG.log_level()).expect("Valid log level"))
|
||||
// Hide unknown certificate errors if using self-signed
|
||||
.level_for("rustls::session", log::LevelFilter::Off)
|
||||
// Hide failed to close stream messages
|
||||
.level_for("hyper::server", log::LevelFilter::Warn)
|
||||
.chain(std::io::stdout());
|
||||
|
||||
if let Some(log_file) = CONFIG.log_file.as_ref() {
|
||||
if let Some(log_file) = CONFIG.log_file() {
|
||||
logger = logger.chain(fern::log_file(log_file)?);
|
||||
}
|
||||
|
||||
logger = chain_syslog(logger);
|
||||
#[cfg(not(windows))]
|
||||
{
|
||||
if cfg!(feature = "enable_syslog") || CONFIG.use_syslog() {
|
||||
logger = chain_syslog(logger);
|
||||
}
|
||||
}
|
||||
|
||||
logger.apply()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "enable_syslog"))]
|
||||
fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch {
|
||||
logger
|
||||
}
|
||||
|
||||
#[cfg(feature = "enable_syslog")]
|
||||
#[cfg(not(windows))]
|
||||
fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch {
|
||||
let syslog_fmt = syslog::Formatter3164 {
|
||||
facility: syslog::Facility::LOG_USER,
|
||||
@@ -129,72 +122,65 @@ fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch {
|
||||
}
|
||||
|
||||
fn check_db() {
|
||||
let path = Path::new(&CONFIG.database_url);
|
||||
if cfg!(feature = "sqlite") {
|
||||
let url = CONFIG.database_url();
|
||||
let path = Path::new(&url);
|
||||
|
||||
if let Some(parent) = path.parent() {
|
||||
use std::fs;
|
||||
if fs::create_dir_all(parent).is_err() {
|
||||
error!("Error creating database directory");
|
||||
exit(1);
|
||||
if let Some(parent) = path.parent() {
|
||||
use std::fs;
|
||||
if fs::create_dir_all(parent).is_err() {
|
||||
error!("Error creating database directory");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Turn on WAL in SQLite
|
||||
if CONFIG.enable_db_wal() {
|
||||
use diesel::RunQueryDsl;
|
||||
let connection = db::get_connection().expect("Can't conect to DB");
|
||||
diesel::sql_query("PRAGMA journal_mode=wal")
|
||||
.execute(&connection)
|
||||
.expect("Failed to turn on WAL");
|
||||
}
|
||||
}
|
||||
|
||||
// Turn on WAL in SQLite
|
||||
use diesel::RunQueryDsl;
|
||||
let connection = db::get_connection().expect("Can't conect to DB");
|
||||
diesel::sql_query("PRAGMA journal_mode=wal")
|
||||
.execute(&connection)
|
||||
.expect("Failed to turn on WAL");
|
||||
db::get_connection().expect("Can't connect to DB");
|
||||
}
|
||||
|
||||
fn check_rsa_keys() {
|
||||
// If the RSA keys don't exist, try to create them
|
||||
if !util::file_exists(&CONFIG.private_rsa_key) || !util::file_exists(&CONFIG.public_rsa_key) {
|
||||
if !util::file_exists(&CONFIG.private_rsa_key()) || !util::file_exists(&CONFIG.public_rsa_key()) {
|
||||
info!("JWT keys don't exist, checking if OpenSSL is available...");
|
||||
|
||||
Command::new("openssl").arg("version").output().unwrap_or_else(|_| {
|
||||
Command::new("openssl").arg("version").status().unwrap_or_else(|_| {
|
||||
info!("Can't create keys because OpenSSL is not available, make sure it's installed and available on the PATH");
|
||||
exit(1);
|
||||
});
|
||||
|
||||
info!("OpenSSL detected, creating keys...");
|
||||
|
||||
let key = CONFIG.rsa_key_filename();
|
||||
|
||||
let pem = format!("{}.pem", key);
|
||||
let priv_der = format!("{}.der", key);
|
||||
let pub_der = format!("{}.pub.der", key);
|
||||
|
||||
let mut success = Command::new("openssl")
|
||||
.arg("genrsa")
|
||||
.arg("-out")
|
||||
.arg(&CONFIG.private_rsa_key_pem)
|
||||
.output()
|
||||
.args(&["genrsa", "-out", &pem])
|
||||
.status()
|
||||
.expect("Failed to create private pem file")
|
||||
.status
|
||||
.success();
|
||||
|
||||
success &= Command::new("openssl")
|
||||
.arg("rsa")
|
||||
.arg("-in")
|
||||
.arg(&CONFIG.private_rsa_key_pem)
|
||||
.arg("-outform")
|
||||
.arg("DER")
|
||||
.arg("-out")
|
||||
.arg(&CONFIG.private_rsa_key)
|
||||
.output()
|
||||
.args(&["rsa", "-in", &pem, "-outform", "DER", "-out", &priv_der])
|
||||
.status()
|
||||
.expect("Failed to create private der file")
|
||||
.status
|
||||
.success();
|
||||
|
||||
success &= Command::new("openssl")
|
||||
.arg("rsa")
|
||||
.arg("-in")
|
||||
.arg(&CONFIG.private_rsa_key)
|
||||
.arg("-inform")
|
||||
.arg("DER")
|
||||
.arg("-RSAPublicKey_out")
|
||||
.arg("-outform")
|
||||
.arg("DER")
|
||||
.arg("-out")
|
||||
.arg(&CONFIG.public_rsa_key)
|
||||
.output()
|
||||
.args(&["rsa", "-in", &priv_der, "-inform", "DER"])
|
||||
.args(&["-RSAPublicKey_out", "-outform", "DER", "-out", &pub_der])
|
||||
.status()
|
||||
.expect("Failed to create public der file")
|
||||
.status
|
||||
.success();
|
||||
|
||||
if success {
|
||||
@@ -207,168 +193,66 @@ fn check_rsa_keys() {
|
||||
}
|
||||
|
||||
fn check_web_vault() {
|
||||
if !CONFIG.web_vault_enabled {
|
||||
if !CONFIG.web_vault_enabled() {
|
||||
return;
|
||||
}
|
||||
|
||||
let index_path = Path::new(&CONFIG.web_vault_folder).join("index.html");
|
||||
let index_path = Path::new(&CONFIG.web_vault_folder()).join("index.html");
|
||||
|
||||
if !index_path.exists() {
|
||||
error!("Web vault is not found. Please follow the steps in the README to install it");
|
||||
error!("Web vault is not found. To install it, please follow the steps in https://github.com/dani-garcia/bitwarden_rs/wiki/Building-binary#install-the-web-vault");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
// Load the config from .env or from environment variables
|
||||
static ref CONFIG: Config = Config::load();
|
||||
}
|
||||
// Embed the migrations from the migrations folder into the application
|
||||
// This way, the program automatically migrates the database to the latest version
|
||||
// https://docs.rs/diesel_migrations/*/diesel_migrations/macro.embed_migrations.html
|
||||
#[allow(unused_imports)]
|
||||
mod migrations {
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MailConfig {
|
||||
smtp_host: String,
|
||||
smtp_port: u16,
|
||||
smtp_ssl: bool,
|
||||
smtp_from: String,
|
||||
smtp_username: Option<String>,
|
||||
smtp_password: Option<String>,
|
||||
}
|
||||
#[cfg(feature = "sqlite")]
|
||||
embed_migrations!("migrations/sqlite");
|
||||
#[cfg(feature = "mysql")]
|
||||
embed_migrations!("migrations/mysql");
|
||||
|
||||
impl MailConfig {
|
||||
fn load() -> Option<Self> {
|
||||
use crate::util::{get_env, get_env_or};
|
||||
pub fn run_migrations() {
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
let connection = crate::db::get_connection().expect("Can't connect to DB");
|
||||
|
||||
// When SMTP_HOST is absent, we assume the user does not want to enable it.
|
||||
let smtp_host = match get_env("SMTP_HOST") {
|
||||
Some(host) => host,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let smtp_from = get_env("SMTP_FROM").unwrap_or_else(|| {
|
||||
error!("Please specify SMTP_FROM to enable SMTP support.");
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let smtp_ssl = get_env_or("SMTP_SSL", true);
|
||||
let smtp_port = get_env("SMTP_PORT").unwrap_or_else(|| if smtp_ssl { 587u16 } else { 25u16 });
|
||||
|
||||
let smtp_username = get_env("SMTP_USERNAME");
|
||||
let smtp_password = get_env("SMTP_PASSWORD").or_else(|| {
|
||||
if smtp_username.as_ref().is_some() {
|
||||
error!("SMTP_PASSWORD is mandatory when specifying SMTP_USERNAME.");
|
||||
exit(1);
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
Some(MailConfig {
|
||||
smtp_host,
|
||||
smtp_port,
|
||||
smtp_ssl,
|
||||
smtp_from,
|
||||
smtp_username,
|
||||
smtp_password,
|
||||
})
|
||||
use std::io::stdout;
|
||||
embedded_migrations::run_with_output(&connection, &mut stdout()).expect("Can't run migrations");
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Config {
|
||||
database_url: String,
|
||||
icon_cache_folder: String,
|
||||
attachments_folder: String,
|
||||
fn launch_rocket() {
|
||||
// Create Rocket object, this stores current log level and sets it's own
|
||||
let rocket = rocket::ignite();
|
||||
|
||||
icon_cache_ttl: u64,
|
||||
icon_cache_negttl: u64,
|
||||
|
||||
private_rsa_key: String,
|
||||
private_rsa_key_pem: String,
|
||||
public_rsa_key: String,
|
||||
|
||||
web_vault_folder: String,
|
||||
web_vault_enabled: bool,
|
||||
|
||||
websocket_enabled: bool,
|
||||
websocket_url: String,
|
||||
|
||||
extended_logging: bool,
|
||||
log_file: Option<String>,
|
||||
|
||||
local_icon_extractor: bool,
|
||||
signups_allowed: bool,
|
||||
invitations_allowed: bool,
|
||||
admin_token: Option<String>,
|
||||
password_iterations: i32,
|
||||
show_password_hint: bool,
|
||||
|
||||
domain: String,
|
||||
domain_set: bool,
|
||||
|
||||
yubico_cred_set: bool,
|
||||
yubico_client_id: String,
|
||||
yubico_secret_key: String,
|
||||
yubico_server: Option<String>,
|
||||
|
||||
mail: Option<MailConfig>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
fn load() -> Self {
|
||||
use crate::util::{get_env, get_env_or};
|
||||
dotenv::dotenv().ok();
|
||||
|
||||
let df = get_env_or("DATA_FOLDER", "data".to_string());
|
||||
let key = get_env_or("RSA_KEY_FILENAME", format!("{}/{}", &df, "rsa_key"));
|
||||
|
||||
let domain = get_env("DOMAIN");
|
||||
|
||||
let yubico_client_id = get_env("YUBICO_CLIENT_ID");
|
||||
let yubico_secret_key = get_env("YUBICO_SECRET_KEY");
|
||||
|
||||
Config {
|
||||
database_url: get_env_or("DATABASE_URL", format!("{}/{}", &df, "db.sqlite3")),
|
||||
icon_cache_folder: get_env_or("ICON_CACHE_FOLDER", format!("{}/{}", &df, "icon_cache")),
|
||||
attachments_folder: get_env_or("ATTACHMENTS_FOLDER", format!("{}/{}", &df, "attachments")),
|
||||
|
||||
// icon_cache_ttl defaults to 30 days (30 * 24 * 60 * 60 seconds)
|
||||
icon_cache_ttl: get_env_or("ICON_CACHE_TTL", 2_592_000),
|
||||
// icon_cache_negttl defaults to 3 days (3 * 24 * 60 * 60 seconds)
|
||||
icon_cache_negttl: get_env_or("ICON_CACHE_NEGTTL", 259_200),
|
||||
|
||||
private_rsa_key: format!("{}.der", &key),
|
||||
private_rsa_key_pem: format!("{}.pem", &key),
|
||||
public_rsa_key: format!("{}.pub.der", &key),
|
||||
|
||||
web_vault_folder: get_env_or("WEB_VAULT_FOLDER", "web-vault/".into()),
|
||||
web_vault_enabled: get_env_or("WEB_VAULT_ENABLED", true),
|
||||
|
||||
websocket_enabled: get_env_or("WEBSOCKET_ENABLED", false),
|
||||
websocket_url: format!(
|
||||
"{}:{}",
|
||||
get_env_or("WEBSOCKET_ADDRESS", "0.0.0.0".to_string()),
|
||||
get_env_or("WEBSOCKET_PORT", 3012)
|
||||
),
|
||||
|
||||
extended_logging: get_env_or("EXTENDED_LOGGING", true),
|
||||
log_file: get_env("LOG_FILE"),
|
||||
|
||||
local_icon_extractor: get_env_or("LOCAL_ICON_EXTRACTOR", false),
|
||||
signups_allowed: get_env_or("SIGNUPS_ALLOWED", true),
|
||||
admin_token: get_env("ADMIN_TOKEN"),
|
||||
invitations_allowed: get_env_or("INVITATIONS_ALLOWED", true),
|
||||
password_iterations: get_env_or("PASSWORD_ITERATIONS", 100_000),
|
||||
show_password_hint: get_env_or("SHOW_PASSWORD_HINT", true),
|
||||
|
||||
domain_set: domain.is_some(),
|
||||
domain: domain.unwrap_or("http://localhost".into()),
|
||||
|
||||
yubico_cred_set: yubico_client_id.is_some() && yubico_secret_key.is_some(),
|
||||
yubico_client_id: yubico_client_id.unwrap_or("00000".into()),
|
||||
yubico_secret_key: yubico_secret_key.unwrap_or("AAAAAAA".into()),
|
||||
yubico_server: get_env("YUBICO_SERVER"),
|
||||
|
||||
mail: MailConfig::load(),
|
||||
}
|
||||
// If we aren't logging the mounts, we force the logging level down
|
||||
if !CONFIG.log_mounts() {
|
||||
log::set_max_level(log::LevelFilter::Warn);
|
||||
}
|
||||
|
||||
let rocket = rocket
|
||||
.mount("/", api::web_routes())
|
||||
.mount("/api", api::core_routes())
|
||||
.mount("/admin", api::admin_routes())
|
||||
.mount("/identity", api::identity_routes())
|
||||
.mount("/icons", api::icons_routes())
|
||||
.mount("/notifications", api::notifications_routes());
|
||||
|
||||
// Force the level up for the fairings, managed state and lauch
|
||||
if !CONFIG.log_mounts() {
|
||||
log::set_max_level(log::LevelFilter::max());
|
||||
}
|
||||
|
||||
let rocket = rocket
|
||||
.manage(db::init_pool())
|
||||
.manage(api::start_notification_server())
|
||||
.attach(util::AppHeaders());
|
||||
|
||||
// Launch and print error if there is one
|
||||
// The launch will restore the original logging level
|
||||
error!("Launch error {:#?}", rocket.launch());
|
||||
}
|
||||
|
@@ -1,195 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
<meta name="description" content="">
|
||||
<meta name="author" content="">
|
||||
<title>Bitwarden_rs Admin Panel</title>
|
||||
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.1.3/css/bootstrap.min.css"
|
||||
integrity="sha256-eSi1q2PG6J7g7ib17yAaWMcrr5GrtohYChqibrV7PBE=" crossorigin="anonymous" />
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js" integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8="
|
||||
crossorigin="anonymous"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/blueimp-md5/2.10.0/js/md5.js" integrity="sha256-tCQ/BldMlN2vWe5gAiNoNb5svoOgVUhlUgv7UjONKKQ="
|
||||
crossorigin="anonymous"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/identicon.js/2.3.3/identicon.min.js" integrity="sha256-nYoL3nK/HA1e1pJvLwNPnpKuKG9q89VFX862r5aohmA="
|
||||
crossorigin="anonymous"></script>
|
||||
|
||||
<style>
|
||||
body { padding-top: 70px; }
|
||||
img { width: 48px; height: 48px; }
|
||||
</style>
|
||||
|
||||
<script>
|
||||
let key = null;
|
||||
|
||||
function identicon(email) {
|
||||
const data = new Identicon(md5(email), {
|
||||
size: 48, format: 'svg'
|
||||
}).toString();
|
||||
return "data:image/svg+xml;base64," + data;
|
||||
}
|
||||
|
||||
function setVis(elem, vis) {
|
||||
if (vis) { $(elem).removeClass('d-none'); }
|
||||
else { $(elem).addClass('d-none'); }
|
||||
}
|
||||
|
||||
function updateVis() {
|
||||
setVis("#no-key-form", !key);
|
||||
setVis("#users-block", key);
|
||||
setVis("#invite-form-block", key);
|
||||
}
|
||||
|
||||
function setKey() {
|
||||
key = $('#key').val() || window.location.hash.slice(1);
|
||||
updateVis();
|
||||
if (key) { loadUsers(); }
|
||||
return false;
|
||||
}
|
||||
|
||||
function resetKey() {
|
||||
key = null;
|
||||
updateVis();
|
||||
}
|
||||
|
||||
function fillRow(data) {
|
||||
for (i in data) {
|
||||
const user = data[i];
|
||||
const row = $("#tmp-row").clone();
|
||||
|
||||
row.attr("id", "user-row:" + user.Id);
|
||||
row.find(".tmp-name").text(user.Name);
|
||||
row.find(".tmp-mail").text(user.Email);
|
||||
row.find(".tmp-icon").attr("src", identicon(user.Email))
|
||||
|
||||
row.find(".tmp-del").on("click", function (e) {
|
||||
var name = prompt("To delete user '" + user.Name + "', please type the name below")
|
||||
if (name) {
|
||||
if (name == user.Name) {
|
||||
deleteUser(user.Id);
|
||||
} else {
|
||||
alert("Wrong name, please try again")
|
||||
}
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
row.appendTo("#users-list");
|
||||
setVis(row, true);
|
||||
}
|
||||
}
|
||||
|
||||
function _headers() { return { "Authorization": "Bearer " + key }; }
|
||||
|
||||
function loadUsers() {
|
||||
$("#users-list").empty();
|
||||
$.get({ url: "/admin/users", headers: _headers() })
|
||||
.done(fillRow).fail(resetKey);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
function _post(url, successMsg, errMsg, resetOnErr, data) {
|
||||
$.post({ url: url, headers: _headers(), data: data })
|
||||
.done(function () {
|
||||
alert(successMsg);
|
||||
loadUsers();
|
||||
}).fail(function (e) {
|
||||
const r = e.responseJSON;
|
||||
const msg = r ? r.ErrorModel.Message : "Unknown error";
|
||||
alert(errMsg + ": " + msg);
|
||||
if (resetOnErr) { resetKey(); }
|
||||
});
|
||||
}
|
||||
|
||||
function deleteUser(id) {
|
||||
_post("/admin/users/" + id + "/delete",
|
||||
"User deleted correctly",
|
||||
"Error deleting user", true);
|
||||
}
|
||||
|
||||
function inviteUser() {
|
||||
inv = $("#email-invite");
|
||||
data = JSON.stringify({ "Email": inv.val() });
|
||||
inv.val("");
|
||||
_post("/admin/invite/", "User invited correctly",
|
||||
"Error inviting user", false, data);
|
||||
return false;
|
||||
}
|
||||
|
||||
$(window).on('load', function () {
|
||||
setKey();
|
||||
|
||||
$("#key-form").submit(setKey);
|
||||
$("#reload-btn").click(loadUsers);
|
||||
$("#invite-form").submit(inviteUser);
|
||||
});
|
||||
</script>
|
||||
</head>
|
||||
|
||||
<body class="bg-light">
|
||||
<nav class="navbar navbar-expand-md navbar-dark bg-dark fixed-top shadow">
|
||||
<a class="navbar-brand" href="#">Bitwarden_rs</a>
|
||||
<div class="navbar-collapse">
|
||||
<ul class="navbar-nav">
|
||||
<li class="nav-item active">
|
||||
<a class="nav-link" href="/admin">Admin Panel</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="/">Vault</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
<main class="container">
|
||||
<div id="no-key-form" class="d-none align-items-center p-3 mb-3 text-white-50 bg-danger rounded shadow">
|
||||
<div>
|
||||
<h6 class="mb-0 text-white">Authentication key needed to continue</h6>
|
||||
<small>Please provide it below:</small>
|
||||
|
||||
<form class="form-inline" id="key-form">
|
||||
<input type="password" class="form-control w-50 mr-2" id="key" placeholder="Enter admin key">
|
||||
<button type="submit" class="btn btn-primary">Save</button>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="users-block" class="d-none my-3 p-3 bg-white rounded shadow">
|
||||
<h6 class="border-bottom pb-2 mb-0">Registered Users</h6>
|
||||
|
||||
<div id="users-list"></div>
|
||||
|
||||
<small class="d-block text-right mt-3">
|
||||
<a id="reload-btn" href="#">Reload users</a>
|
||||
</small>
|
||||
</div>
|
||||
|
||||
<div id="invite-form-block" class="d-none align-items-center p-3 mb-3 text-white-50 bg-secondary rounded shadow">
|
||||
<div>
|
||||
<h6 class="mb-0 text-white">Invite User</h6>
|
||||
<small>Email:</small>
|
||||
|
||||
<form class="form-inline" id="invite-form">
|
||||
<input type="email" class="form-control w-50 mr-2" id="email-invite" placeholder="Enter email">
|
||||
<button type="submit" class="btn btn-primary">Invite</button>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="tmp-row" class="d-none media pt-3">
|
||||
<img class="mr-2 rounded tmp-icon">
|
||||
<div class="media-body pb-3 mb-0 small border-bottom">
|
||||
<div class="d-flex justify-content-between">
|
||||
<strong class="tmp-name">Full Name</strong>
|
||||
<a class="tmp-del mr-3" href="#">Delete User</a>
|
||||
</div>
|
||||
<span class="d-block tmp-mail">Email</span>
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
||||
</body>
|
||||
|
||||
</html>
|
6
src/static/images/error-x.svg
Normal file
6
src/static/images/error-x.svg
Normal file
@@ -0,0 +1,6 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="450" height="450" version="1">
|
||||
<circle cx="225" cy="225" r="225" fill="#C33"/>
|
||||
<g fill="#FFF" stroke="#FFF" stroke-width="70">
|
||||
<path d="M107 110l236 237M107 347l236-237"/>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 241 B |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user