Compare commits
145 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
1e5306b820 | ||
|
6890c25ea1 | ||
|
48482fece0 | ||
|
1dc1d4df72 | ||
|
2b4dd6f137 | ||
|
3da44a8d30 | ||
|
34ea10475d | ||
|
ced7f1771a | ||
|
af2235bf88 | ||
|
305de2e2cd | ||
|
8756c5c255 | ||
|
27609ac4cc | ||
|
95d906bdbb | ||
|
4bb0d7bc05 | ||
|
d9599155ae | ||
|
244bad3a24 | ||
|
f7056bcaa5 | ||
|
994669fb69 | ||
|
3ab90259f2 | ||
|
155109dea1 | ||
|
b268c3dd1c | ||
|
4e64dbdde4 | ||
|
a2955daffe | ||
|
d3921b973b | ||
|
cf6ad3cb15 | ||
|
90e0b7fec6 | ||
|
d77333576b | ||
|
73ff8d79f7 | ||
|
95fc88ae5b | ||
|
1d0eaac260 | ||
|
3565bfc939 | ||
|
a82c04910f | ||
|
233f03ca2b | ||
|
93c881a7a9 | ||
|
0af3956abd | ||
|
15feff3e79 | ||
|
5c5700caa7 | ||
|
3bddc176d6 | ||
|
9caf4bf383 | ||
|
9b2234fa0e | ||
|
1f79fdec4e | ||
|
a56f4c97e4 | ||
|
3a3390963c | ||
|
fd27759a95 | ||
|
01d8056c73 | ||
|
81fa33ebb5 | ||
|
e8aa3bc066 | ||
|
0bf0125e82 | ||
|
6209e778e5 | ||
|
5323283f98 | ||
|
57e17d0648 | ||
|
da55d5ec70 | ||
|
828a060698 | ||
|
3e5971b9db | ||
|
47c2625d38 | ||
|
49af9cf4f5 | ||
|
6b1daeba05 | ||
|
9f1240d8d9 | ||
|
a8138be69b | ||
|
ea57dc3bc9 | ||
|
131348a49f | ||
|
b22564cb00 | ||
|
16eb0a56f9 | ||
|
3e4ff47a38 | ||
|
8ea01a67f6 | ||
|
aa5cc642e1 | ||
|
a121cb6f00 | ||
|
60164182ae | ||
|
f842a80cdb | ||
|
4b6a574ee0 | ||
|
f9ebb780f9 | ||
|
1fc6c30652 | ||
|
46a1a013cd | ||
|
551810c486 | ||
|
b987ba506d | ||
|
84810f2bb2 | ||
|
424d666a50 | ||
|
a71359f647 | ||
|
d93c344176 | ||
|
b9c3213b90 | ||
|
95e24ffc51 | ||
|
00d56d7295 | ||
|
7436b454db | ||
|
8da5b99482 | ||
|
2969e87b52 | ||
|
ce62e898c3 | ||
|
431462d839 | ||
|
7d0e234b34 | ||
|
dad1b1bee9 | ||
|
9312cebee3 | ||
|
cdf5b6ec2d | ||
|
ce99fc8f95 | ||
|
a75d050001 | ||
|
75cfd10f11 | ||
|
9859ba6339 | ||
|
513056f711 | ||
|
ebe334fcc7 | ||
|
0eec12472e | ||
|
39106d440a | ||
|
9117095764 | ||
|
099bba950c | ||
|
e37ff60617 | ||
|
5b14608041 | ||
|
ad92692bab | ||
|
d956d42903 | ||
|
d69be7d03a | ||
|
f82de8d00d | ||
|
c836f88ff2 | ||
|
8b660ae090 | ||
|
9323c57f49 | ||
|
85e3c73525 | ||
|
a74bc2e58f | ||
|
0680638933 | ||
|
46d31ee5f7 | ||
|
e794b397d3 | ||
|
d41350050b | ||
|
4cd5b06b7f | ||
|
cd768439d2 | ||
|
9e5fd2d576 | ||
|
ecb46f591c | ||
|
d62d53aa8e | ||
|
2c515ab13c | ||
|
83d556ff0c | ||
|
678d313836 | ||
|
705d840ea3 | ||
|
7dff8c01dd | ||
|
5860679624 | ||
|
4628e4519d | ||
|
b884fd20a1 | ||
|
67c657003d | ||
|
580c1bbc7d | ||
|
2b6383d243 | ||
|
f27455a26f | ||
|
1d4f900e48 | ||
|
c5ca588a6f | ||
|
06888251e3 | ||
|
1a6e4cf4e4 | ||
|
9f86196a9d | ||
|
1e31043fb3 | ||
|
85adcf1ae5 | ||
|
9abb4d2873 | ||
|
235ff44736 | ||
|
9c2d741749 | ||
|
37cc0c34cf | ||
|
5633b6ac94 |
@@ -4,6 +4,8 @@ target
|
||||
# Data folder
|
||||
data
|
||||
.env
|
||||
.env.template
|
||||
.gitattributes
|
||||
|
||||
# IDE files
|
||||
.vscode
|
||||
|
23
.editorconfig
Normal file
@@ -0,0 +1,23 @@
|
||||
# EditorConfig is awesome: https://EditorConfig.org
|
||||
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
[*]
|
||||
end_of_line = lf
|
||||
charset = utf-8
|
||||
|
||||
[*.{rs,py}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[*.{yml,yaml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
@@ -1,4 +1,4 @@
|
||||
## Bitwarden_RS Configuration File
|
||||
## Vaultwarden Configuration File
|
||||
## Uncomment any of the following lines to change the defaults
|
||||
##
|
||||
## Be aware that most of these settings will be overridden if they were changed
|
||||
@@ -28,6 +28,7 @@
|
||||
# RSA_KEY_FILENAME=data/rsa_key
|
||||
# ICON_CACHE_FOLDER=data/icon_cache
|
||||
# ATTACHMENTS_FOLDER=data/attachments
|
||||
# SENDS_FOLDER=data/sends
|
||||
|
||||
## Templates data folder, by default uses embedded templates
|
||||
## Check source code to see the format
|
||||
@@ -35,9 +36,9 @@
|
||||
## Automatically reload the templates for every request, slow, use only for development
|
||||
# RELOAD_TEMPLATES=false
|
||||
|
||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Client-IP"
|
||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
||||
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||
# IP_HEADER=X-Client-IP
|
||||
# IP_HEADER=X-Real-IP
|
||||
|
||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||
# ICON_CACHE_TTL=2592000
|
||||
@@ -55,6 +56,23 @@
|
||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||
# WEBSOCKET_PORT=3012
|
||||
|
||||
## Job scheduler settings
|
||||
##
|
||||
## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
|
||||
## and are always in terms of UTC time (regardless of your local time zone settings).
|
||||
##
|
||||
## How often (in ms) the job scheduler thread checks for jobs that need running.
|
||||
## Set to 0 to globally disable scheduled jobs.
|
||||
# JOB_POLL_INTERVAL_MS=30000
|
||||
##
|
||||
## Cron schedule of the job that checks for Sends past their deletion date.
|
||||
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
|
||||
# SEND_PURGE_SCHEDULE="0 5 * * * *"
|
||||
##
|
||||
## Cron schedule of the job that checks for trashed items to delete permanently.
|
||||
## Defaults to daily (5 minutes after midnight). Set blank to disable this job.
|
||||
# TRASH_PURGE_SCHEDULE="0 5 0 * * *"
|
||||
|
||||
## Enable extended logging, which shows timestamps and targets in the logs
|
||||
# EXTENDED_LOGGING=true
|
||||
|
||||
@@ -81,7 +99,7 @@
|
||||
## Enable WAL for the DB
|
||||
## Set to false to avoid enabling WAL during startup.
|
||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||
## this setting only prevents bitwarden_rs from automatically enabling it on start.
|
||||
## this setting only prevents vaultwarden from automatically enabling it on start.
|
||||
## Please read project wiki page about this setting first before changing the value as it can
|
||||
## cause performance degradation or might render the service unable to start.
|
||||
# ENABLE_DB_WAL=true
|
||||
@@ -169,7 +187,7 @@
|
||||
## Invitations org admins to invite users, even when signups are disabled
|
||||
# INVITATIONS_ALLOWED=true
|
||||
## Name shown in the invitation emails that don't come from a specific organization
|
||||
# INVITATION_ORG_NAME=Bitwarden_RS
|
||||
# INVITATION_ORG_NAME=Vaultwarden
|
||||
|
||||
## Per-organization attachment limit (KB)
|
||||
## Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
||||
@@ -241,8 +259,8 @@
|
||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
||||
# SMTP_HOST=smtp.domain.tld
|
||||
# SMTP_FROM=bitwarden-rs@domain.tld
|
||||
# SMTP_FROM_NAME=Bitwarden_RS
|
||||
# SMTP_FROM=vaultwarden@domain.tld
|
||||
# SMTP_FROM_NAME=Vaultwarden
|
||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
|
||||
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
|
||||
|
3
.gitattributes
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# Ignore vendored scripts in GitHub stats
|
||||
src/static/scripts/* linguist-vendored
|
||||
|
61
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,51 +1,66 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
about: Use this ONLY for bugs in vaultwarden itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
<!--
|
||||
# ###
|
||||
NOTE: Please update to the latest version of bitwarden_rs before reporting an issue!
|
||||
This saves you and us a lot of time and troubleshooting.
|
||||
See: https://github.com/dani-garcia/bitwarden_rs/issues/1180
|
||||
# ###
|
||||
# ###
|
||||
NOTE: Please update to the latest version of vaultwarden before reporting an issue!
|
||||
This saves you and us a lot of time and troubleshooting.
|
||||
See:
|
||||
* https://github.com/dani-garcia/vaultwarden/issues/1180
|
||||
* https://github.com/dani-garcia/vaultwarden/wiki/Updating-the-vaultwarden-image
|
||||
# ###
|
||||
-->
|
||||
|
||||
|
||||
<!--
|
||||
Please fill out the following template to make solving your problem easier and faster for us.
|
||||
This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them.
|
||||
|
||||
Remember to hide/obfuscate personal and confidential information,
|
||||
such as names, global IP/DNS addresses and especially passwords, if necessary.
|
||||
Remember to hide/redact personal or confidential information,
|
||||
such as passwords, IP addresses, and DNS names as appropriate.
|
||||
-->
|
||||
|
||||
### Subject of the issue
|
||||
<!-- Describe your issue here.-->
|
||||
<!-- Describe your issue here. -->
|
||||
|
||||
### Your environment
|
||||
<!-- The version number, obtained from the logs or the admin diagnostics page -->
|
||||
<!-- Remember to check your issue on the latest version first! -->
|
||||
* Bitwarden_rs version:
|
||||
<!-- How the server was installed: Docker image / package / built from source -->
|
||||
### Deployment environment
|
||||
|
||||
<!--
|
||||
=========================================================================================
|
||||
Preferably, use the `Generate Support String` button on the admin page's Diagnostics tab.
|
||||
That will auto-generate most of the info requested in this section.
|
||||
=========================================================================================
|
||||
-->
|
||||
|
||||
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
|
||||
<!-- This is NOT the version number shown on the web vault, which is versioned separately from vaultwarden -->
|
||||
<!-- Remember to check if your issue exists on the latest version first! -->
|
||||
* vaultwarden version:
|
||||
|
||||
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
|
||||
* Install method:
|
||||
* Clients used: <!-- if applicable -->
|
||||
|
||||
* Clients used: <!-- web vault, desktop, Android, iOS, etc. (if applicable) -->
|
||||
|
||||
* Reverse proxy and version: <!-- if applicable -->
|
||||
* Version of mysql/postgresql: <!-- if applicable -->
|
||||
* Other relevant information:
|
||||
|
||||
* MySQL/MariaDB or PostgreSQL version: <!-- if applicable -->
|
||||
|
||||
* Other relevant details:
|
||||
|
||||
### Steps to reproduce
|
||||
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
||||
and how did you start bitwarden_rs? -->
|
||||
and how did you start vaultwarden? -->
|
||||
|
||||
### Expected behaviour
|
||||
<!-- Tell us what should happen -->
|
||||
<!-- Tell us what you expected to happen -->
|
||||
|
||||
### Actual behaviour
|
||||
<!-- Tell us what happens instead -->
|
||||
<!-- Tell us what actually happened -->
|
||||
|
||||
### Relevant logs
|
||||
<!-- Share some logfiles, screenshots or output of relevant programs with us. -->
|
||||
### Troubleshooting data
|
||||
<!-- Share any log files, screenshots, or other relevant troubleshooting data -->
|
||||
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Discourse forum for bitwarden_rs
|
||||
url: https://bitwardenrs.discourse.group/
|
||||
about: Use this forum to request features or get help with usage/configuration.
|
||||
- name: GitHub Discussions for vaultwarden
|
||||
url: https://github.com/dani-garcia/vaultwarden/discussions
|
||||
about: An alternative to the Discourse forum, if this is easier for you.
|
11
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,11 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: better for forum
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Please submit all your feature requests to the forum
|
||||
Link: https://bitwardenrs.discourse.group/c/feature-requests
|
@@ -1,11 +0,0 @@
|
||||
---
|
||||
name: Help with installation/configuration
|
||||
about: Any questions about the setup of bitwarden_rs
|
||||
title: ''
|
||||
labels: better for forum
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Please submit all your third party help requests to the forum
|
||||
Link: https://bitwardenrs.discourse.group/c/help
|
@@ -1,11 +0,0 @@
|
||||
---
|
||||
name: Help with proxy/database/NAS setup
|
||||
about: Any questions about third party software
|
||||
title: ''
|
||||
labels: better for forum
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Please submit all your third party help requests to the forum
|
||||
Link: https://bitwardenrs.discourse.group/c/third-party-help
|
36
.github/workflows/build.yml
vendored
@@ -2,14 +2,21 @@ name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- ".dockerignore"
|
||||
- ".env.template"
|
||||
- ".gitattributes"
|
||||
- ".gitignore"
|
||||
- "azure-pipelines.yml"
|
||||
- "docker/**"
|
||||
- "hooks/**"
|
||||
- "tools/**"
|
||||
- ".github/FUNDING.yml"
|
||||
- ".github/ISSUE_TEMPLATE/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -82,10 +89,11 @@ jobs:
|
||||
with:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target-triple }}
|
||||
components: clippy, rustfmt
|
||||
# End Uses the rust-toolchain file to determine version
|
||||
|
||||
|
||||
# Run cargo tests (In release mode to speed up cargo build afterwards)
|
||||
# Run cargo tests (In release mode to speed up future builds)
|
||||
- name: '`cargo test --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@@ -94,6 +102,24 @@ jobs:
|
||||
# End Run cargo tests
|
||||
|
||||
|
||||
# Run cargo clippy (In release mode to speed up future builds)
|
||||
- name: '`cargo clippy --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||
# End Run cargo clippy
|
||||
|
||||
|
||||
# Run cargo fmt
|
||||
- name: '`cargo fmt`'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
# End Run cargo fmt
|
||||
|
||||
|
||||
# Build the binary
|
||||
- name: '`cargo build --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||
uses: actions-rs/cargo@v1
|
||||
@@ -107,8 +133,8 @@ jobs:
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
||||
name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||
# End Upload artifact to Github Actions
|
||||
|
||||
|
||||
@@ -119,7 +145,7 @@ jobs:
|
||||
# uses: Shopify/upload-to-release@1
|
||||
# if: startsWith(github.ref, 'refs/tags/')
|
||||
# with:
|
||||
# name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
# path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
||||
# name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
# path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# End Upload to github actions release
|
||||
|
3
.github/workflows/hadolint.yml
vendored
@@ -1,6 +1,7 @@
|
||||
name: Hadolint
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths:
|
||||
@@ -24,7 +25,7 @@ jobs:
|
||||
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||
sudo chmod +x /usr/local/bin/hadolint
|
||||
env:
|
||||
HADOLINT_VERSION: 1.19.0
|
||||
HADOLINT_VERSION: 2.0.0
|
||||
# End Download hadolint
|
||||
|
||||
# Test Dockerfiles
|
||||
|
1441
Cargo.lock
generated
69
Cargo.toml
@@ -1,10 +1,10 @@
|
||||
[package]
|
||||
name = "bitwarden_rs"
|
||||
name = "vaultwarden"
|
||||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2018"
|
||||
|
||||
repository = "https://github.com/dani-garcia/bitwarden_rs"
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
readme = "README.md"
|
||||
license = "GPL-3.0-only"
|
||||
publish = false
|
||||
@@ -32,53 +32,55 @@ rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
|
||||
rocket_contrib = "0.5.0-dev"
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.10.10", features = ["blocking", "json"] }
|
||||
reqwest = { version = "0.11.3", features = ["blocking", "json", "gzip", "brotli", "socks"] }
|
||||
|
||||
# multipart/form-data support
|
||||
multipart = { version = "0.17.0", features = ["server"], default-features = false }
|
||||
multipart = { version = "0.17.1", features = ["server"], default-features = false }
|
||||
|
||||
# WebSockets library
|
||||
ws = { version = "0.10.0", package = "parity-ws" }
|
||||
|
||||
# MessagePack library
|
||||
rmpv = "0.4.6"
|
||||
rmpv = "0.4.7"
|
||||
|
||||
# Concurrent hashmap implementation
|
||||
chashmap = "2.2.2"
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = "1.0.118"
|
||||
serde_derive = "1.0.118"
|
||||
serde_json = "1.0.60"
|
||||
serde = { version = "1.0.125", features = ["derive"] }
|
||||
serde_json = "1.0.64"
|
||||
|
||||
# Logging
|
||||
log = "0.4.11"
|
||||
log = "0.4.14"
|
||||
fern = { version = "0.6.0", features = ["syslog-4"] }
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "1.4.5", features = [ "chrono", "r2d2"] }
|
||||
diesel = { version = "1.4.6", features = [ "chrono", "r2d2"] }
|
||||
diesel_migrations = "1.4.0"
|
||||
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.18.0", features = ["bundled"], optional = true }
|
||||
libsqlite3-sys = { version = "0.20.1", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
rand = "0.7.3"
|
||||
ring = "0.16.19"
|
||||
rand = "0.8.3"
|
||||
ring = "0.16.20"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "0.8.1", features = ["v4"] }
|
||||
uuid = { version = "0.8.2", features = ["v4"] }
|
||||
|
||||
# Date and time libraries
|
||||
chrono = "0.4.19"
|
||||
chrono = { version = "0.4.19", features = ["serde"] }
|
||||
chrono-tz = "0.5.3"
|
||||
time = "0.2.23"
|
||||
time = "0.2.26"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler = "1.2.1"
|
||||
|
||||
# TOTP library
|
||||
oath = "0.10.2"
|
||||
|
||||
# Data encoding library
|
||||
data-encoding = "2.3.1"
|
||||
data-encoding = "2.3.2"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "7.2.0"
|
||||
@@ -87,46 +89,48 @@ jsonwebtoken = "7.2.0"
|
||||
u2f = "0.2.0"
|
||||
|
||||
# Yubico Library
|
||||
yubico = { version = "0.9.1", features = ["online-tokio"], default-features = false }
|
||||
yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false }
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenv = { version = "0.15.0", default-features = false }
|
||||
|
||||
# Lazy initialization
|
||||
once_cell = "1.5.2"
|
||||
once_cell = "1.7.2"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.14"
|
||||
num-derive = "0.3.3"
|
||||
|
||||
# Email libraries
|
||||
lettre = { version = "0.10.0-alpha.4", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||
newline-converter = "0.1.0"
|
||||
tracing = { version = "0.1.25", features = ["log"] } # Needed to have lettre trace logging used when SMTP_DEBUG is enabled.
|
||||
lettre = { version = "0.10.0-beta.3", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||
newline-converter = "0.2.0"
|
||||
|
||||
# Template library
|
||||
handlebars = { version = "3.5.1", features = ["dir_source"] }
|
||||
handlebars = { version = "3.5.4", features = ["dir_source"] }
|
||||
|
||||
# For favicon extraction from main website
|
||||
soup = "0.5.0"
|
||||
regex = "1.4.2"
|
||||
html5ever = "0.25.1"
|
||||
markup5ever_rcdom = "0.1.0"
|
||||
regex = { version = "1.4.5", features = ["std", "perf"], default-features = false }
|
||||
data-url = "0.1.0"
|
||||
|
||||
# Used by U2F, JWT and Postgres
|
||||
openssl = "0.10.31"
|
||||
openssl = "0.10.34"
|
||||
|
||||
# URL encoding library
|
||||
percent-encoding = "2.1.0"
|
||||
# Punycode conversion
|
||||
idna = "0.2.0"
|
||||
idna = "0.2.2"
|
||||
|
||||
# CLI argument parsing
|
||||
structopt = "0.3.21"
|
||||
pico-args = "0.4.0"
|
||||
|
||||
# Logging panics to logfile instead stderr only
|
||||
backtrace = "0.3.55"
|
||||
backtrace = "0.3.56"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.4"
|
||||
paste = "1.0.5"
|
||||
|
||||
[patch.crates-io]
|
||||
# Use newest ring
|
||||
@@ -135,3 +139,10 @@ rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e3
|
||||
|
||||
# For favicon extraction from main website
|
||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '540ede02d0771824c0c80ff9f57fe8eff38b1291' }
|
||||
|
||||
# The maintainer of the `job_scheduler` crate doesn't seem to have responded
|
||||
# to any issues or PRs for almost a year (as of April 2021). This hopefully
|
||||
# temporary fork updates Cargo.toml to use more up-to-date dependencies.
|
||||
# In particular, `cron` has since implemented parsing of some common syntax
|
||||
# that wasn't previously supported (https://github.com/zslayton/cron/pull/64).
|
||||
job_scheduler = { git = 'https://github.com/jjlin/job_scheduler', rev = 'ee023418dbba2bfe1e30a5fd7d937f9e33739806' }
|
||||
|
61
README.md
@@ -1,15 +1,14 @@
|
||||
### This is a Bitwarden server API implementation written in Rust compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||
### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||
|
||||
---
|
||||
|
||||
[](https://travis-ci.org/dani-garcia/bitwarden_rs)
|
||||
[](https://hub.docker.com/r/bitwardenrs/server)
|
||||
[](https://deps.rs/repo/github/dani-garcia/bitwarden_rs)
|
||||
[](https://github.com/dani-garcia/bitwarden_rs/releases/latest)
|
||||
[](https://github.com/dani-garcia/bitwarden_rs/blob/master/LICENSE.txt)
|
||||
[](https://matrix.to/#/#bitwarden_rs:matrix.org)
|
||||
[](https://hub.docker.com/r/vaultwarden/server)
|
||||
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/master/LICENSE.txt)
|
||||
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/bitwarden_rs).
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||
|
||||
@@ -33,29 +32,57 @@ Basically full implementation of Bitwarden API is provided including:
|
||||
Pull the docker image and mount a volume from the host for persistent storage:
|
||||
|
||||
```sh
|
||||
docker pull bitwardenrs/server:latest
|
||||
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 bitwardenrs/server:latest
|
||||
docker pull vaultwarden/server:latest
|
||||
docker run -d --name vaultwarden -v /vw-data/:/data/ -p 80:80 vaultwarden/server:latest
|
||||
```
|
||||
This will preserve any persistent data under /bw-data/, you can adapt the path to whatever suits you.
|
||||
|
||||
**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS.
|
||||
|
||||
This can be configured in [bitwarden_rs directly](https://github.com/dani-garcia/bitwarden_rs/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/bitwarden_rs/wiki/Proxy-examples)).
|
||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||
|
||||
If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).
|
||||
|
||||
## Usage
|
||||
See the [bitwarden_rs wiki](https://github.com/dani-garcia/bitwarden_rs/wiki) for more information on how to configure and run the bitwarden_rs server.
|
||||
See the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server.
|
||||
|
||||
## Get in touch
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://bitwardenrs.discourse.group/).
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://vaultwarden.discourse.group/).
|
||||
|
||||
If you spot any bugs or crashes with bitwarden_rs itself, please [create an issue](https://github.com/dani-garcia/bitwarden_rs/issues/). Make sure there aren't any similar issues open, though!
|
||||
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure there aren't any similar issues open, though!
|
||||
|
||||
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/#bitwarden_rs:matrix.org) room on Matrix. Feel free to join us!
|
||||
If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us!
|
||||
|
||||
### Sponsors
|
||||
Thanks for your contribution to the project!
|
||||
|
||||
- [@ChonoN](https://github.com/ChonoN)
|
||||
- [@themightychris](https://github.com/themightychris)
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/netdadaltd">
|
||||
<img src="https://avatars.githubusercontent.com/u/77323954?s=75&v=4" width="75px;" alt="netdadaltd"/>
|
||||
<br />
|
||||
<sub><b>netDada Ltd.</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<br/>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/ChonoN" style="width: 75px">
|
||||
<sub><b>ChonoN</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/themightychris">
|
||||
<sub><b>themightychris</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
14
build.rs
@@ -1,7 +1,7 @@
|
||||
use std::process::Command;
|
||||
use std::env;
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
fn main() {
|
||||
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
|
||||
#[cfg(feature = "sqlite")]
|
||||
println!("cargo:rustc-cfg=sqlite");
|
||||
@@ -11,8 +11,10 @@ fn main() {
|
||||
println!("cargo:rustc-cfg=postgresql");
|
||||
|
||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
||||
|
||||
compile_error!(
|
||||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||
);
|
||||
|
||||
if let Ok(version) = env::var("BWRS_VERSION") {
|
||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||
@@ -56,12 +58,12 @@ fn read_git_info() -> Result<(), std::io::Error> {
|
||||
// Combined version
|
||||
let version = if let Some(exact) = exact_tag {
|
||||
exact
|
||||
} else if &branch != "master" {
|
||||
} else if &branch != "main" && &branch != "master" {
|
||||
format!("{}-{} ({})", last_tag, rev_short, branch)
|
||||
} else {
|
||||
format!("{}-{}", last_tag, rev_short)
|
||||
};
|
||||
|
||||
|
||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||
|
||||
|
33
docker/Dockerfile.buildx
Normal file
@@ -0,0 +1,33 @@
|
||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
||||
# manifest, rather than the target arch. For example:
|
||||
#
|
||||
# $ docker inspect vaultwarden/server:latest-armv7 | jq -r '.[]|.Architecture'
|
||||
# amd64
|
||||
#
|
||||
# Recent versions of Docker have started printing a warning when the image's
|
||||
# claimed arch doesn't match the host arch. For example:
|
||||
#
|
||||
# WARNING: The requested image's platform (linux/amd64) does not match the
|
||||
# detected host platform (linux/arm/v7) and no specific platform was requested
|
||||
#
|
||||
# The image still works fine, but the spurious warning creates confusion.
|
||||
#
|
||||
# Docker doesn't seem to provide a way to directly set the arch of an image
|
||||
# at build time. To resolve the build vs. target arch discrepancy, we use
|
||||
# Docker Buildx to build a new set of images with the correct target arch.
|
||||
#
|
||||
# Docker Buildx uses this Dockerfile to build an image for each requested
|
||||
# platform. Since the Dockerfile basically consists of a single `FROM`
|
||||
# instruction, we're effectively telling Buildx to build a platform-specific
|
||||
# image by simply copying the existing cross-built image and setting the
|
||||
# correct target arch as a side effect.
|
||||
#
|
||||
# References:
|
||||
#
|
||||
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
||||
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
|
||||
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
|
||||
#
|
||||
ARG LOCAL_REPO
|
||||
ARG DOCKER_TAG
|
||||
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}
|
@@ -1,30 +1,30 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
{% set build_stage_base_image = "rust:1.48" %}
|
||||
{% set build_stage_base_image = "rust:1.51" %}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
{% set build_stage_base_image = "clux/muslrust:nightly-2020-11-22" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.12" %}
|
||||
{% set build_stage_base_image = "clux/muslrust:nightly-2021-04-14" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.13" %}
|
||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||
{% elif "arm32v7" in target_file %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.12" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.13" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||
{% endif %}
|
||||
{% elif "amd64" in target_file %}
|
||||
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
||||
{% elif "arm64v8" in target_file %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
||||
{% set package_arch_name = "arm64" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||
{% elif "arm32v6" in target_file %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
||||
{% set package_arch_name = "armel" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||
{% elif "arm32v7" in target_file %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
||||
{% set package_arch_name = "armhf" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||
@@ -44,19 +44,26 @@
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
{% set vault_image_hash = "sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0" %}
|
||||
{% raw %}
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
{% set vault_version = "2.19.0d" %}
|
||||
{% set vault_image_digest = "sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233" %}
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
{% endraw %}
|
||||
FROM bitwardenrs/web-vault@{{ vault_image_hash }} as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:v{{ vault_version }}
|
||||
# [vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" vaultwarden/web-vault@{{ vault_image_digest }}
|
||||
# [vaultwarden/web-vault:v{{ vault_version }}]
|
||||
#
|
||||
FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM {{ build_stage_base_image }} as build
|
||||
@@ -86,6 +93,9 @@ RUN rustup set profile minimal
|
||||
{% if "alpine" in target_file %}
|
||||
ENV USER "root"
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
{% if "armv7" in target_file %}
|
||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||
{% endif %}
|
||||
{% elif "arm" in target_file %}
|
||||
# Install required build libs for {{ package_arch_name }} architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
@@ -178,8 +188,8 @@ RUN touch src/main.rs
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "arm32v7" in target_file %}
|
||||
RUN musl-strip target/{{ package_arch_target }}/release/bitwarden_rs
|
||||
{% if "armv7" in target_file %}
|
||||
RUN musl-strip target/{{ package_arch_target }}/release/vaultwarden
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -204,9 +214,7 @@ RUN [ "cross-build-start" ]
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
{% if "sqlite" in features %}
|
||||
sqlite \
|
||||
{% endif %}
|
||||
dumb-init \
|
||||
{% if "mysql" in features %}
|
||||
mariadb-connector-c \
|
||||
{% endif %}
|
||||
@@ -220,14 +228,11 @@ RUN apt-get update && apt-get install -y \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
{% if "alpine" in target_file and "arm32v7" in target_file %}
|
||||
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community catatonit
|
||||
{% endif %}
|
||||
|
||||
RUN mkdir /data
|
||||
{% if "amd64" not in target_file %}
|
||||
@@ -241,12 +246,13 @@ EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
{% if package_arch_target is defined %}
|
||||
COPY --from=build /app/target/{{ package_arch_target }}/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
||||
{% else %}
|
||||
COPY --from=build /app/target/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
{% endif %}
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
@@ -255,9 +261,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
{% if "alpine" in target_file and "arm32v7" in target_file %}
|
||||
CMD ["catatonit", "/start.sh"]
|
||||
{% else %}
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
{% endif %}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
OBJECTS := $(shell find -mindepth 2 -name 'Dockerfile*')
|
||||
OBJECTS := $(shell find ./ -mindepth 2 -name 'Dockerfile*')
|
||||
|
||||
all: $(OBJECTS)
|
||||
|
||||
|
@@ -1,24 +1,31 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
FROM bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0 as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.19.0d
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.19.0d
|
||||
# [vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233
|
||||
# [vaultwarden/web-vault:v2.19.0d]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.48 as build
|
||||
FROM rust:1.51 as build
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
@@ -78,7 +85,7 @@ RUN apt-get update && apt-get install -y \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
@@ -90,9 +97,10 @@ EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
@@ -100,6 +108,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
|
@@ -1,24 +1,31 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
FROM bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0 as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.19.0d
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.19.0d
|
||||
# [vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233
|
||||
# [vaultwarden/web-vault:v2.19.0d]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM clux/muslrust:nightly-2020-11-22 as build
|
||||
FROM clux/muslrust:nightly-2021-04-14 as build
|
||||
|
||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||
ARG DB=sqlite,postgresql
|
||||
@@ -63,7 +70,7 @@ RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.12
|
||||
FROM alpine:3.13
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
@@ -74,7 +81,7 @@ ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
sqlite \
|
||||
dumb-init \
|
||||
postgresql-libs \
|
||||
ca-certificates
|
||||
|
||||
@@ -85,9 +92,10 @@ EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
@@ -95,6 +103,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
|
@@ -1,24 +1,31 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
FROM bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0 as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.19.0d
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.19.0d
|
||||
# [vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233
|
||||
# [vaultwarden/web-vault:v2.19.0d]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.48 as build
|
||||
FROM rust:1.51 as build
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
@@ -121,7 +128,7 @@ RUN apt-get update && apt-get install -y \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
@@ -136,9 +143,10 @@ EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
@@ -146,6 +154,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -1,24 +1,31 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
FROM bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0 as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.19.0d
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.19.0d
|
||||
# [vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233
|
||||
# [vaultwarden/web-vault:v2.19.0d]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.48 as build
|
||||
FROM rust:1.51 as build
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
@@ -121,7 +128,7 @@ RUN apt-get update && apt-get install -y \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
@@ -136,9 +143,10 @@ EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
@@ -146,6 +154,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -1,24 +1,31 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
FROM bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0 as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.19.0d
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.19.0d
|
||||
# [vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233
|
||||
# [vaultwarden/web-vault:v2.19.0d]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.48 as build
|
||||
FROM rust:1.51 as build
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
@@ -121,7 +128,7 @@ RUN apt-get update && apt-get install -y \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
@@ -136,9 +143,10 @@ EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
@@ -146,6 +154,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
@@ -1,21 +1,28 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
FROM bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0 as vault
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.19.0d
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.19.0d
|
||||
# [vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233
|
||||
# [vaultwarden/web-vault:v2.19.0d]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:a7bd6bc4db33bd45f723c4b1ac90918b7f80204560683cfc8efd9efd03a9b233 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
||||
@@ -31,6 +38,7 @@ RUN rustup set profile minimal
|
||||
|
||||
ENV USER "root"
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -59,12 +67,12 @@ RUN touch src/main.rs
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/bitwarden_rs
|
||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.12
|
||||
FROM balenalib/armv7hf-alpine:3.13
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
@@ -77,9 +85,8 @@ RUN [ "cross-build-start" ]
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
sqlite \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community catatonit
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
@@ -91,9 +98,10 @@ EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
@@ -101,6 +109,5 @@ COPY docker/start.sh /start.sh
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["catatonit", "/start.sh"]
|
||||
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
@@ -1,10 +1,20 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ -r /etc/bitwarden_rs.sh ]; then
|
||||
if [ -r /etc/vaultwarden.sh ]; then
|
||||
. /etc/vaultwarden.sh
|
||||
elif [ -r /etc/bitwarden_rs.sh ]; then
|
||||
echo "### You are using the old /etc/bitwarden_rs.sh script, please migrate to /etc/vaultwarden.sh ###"
|
||||
. /etc/bitwarden_rs.sh
|
||||
fi
|
||||
|
||||
if [ -d /etc/bitwarden_rs.d ]; then
|
||||
if [ -d /etc/vaultwarden.d ]; then
|
||||
for f in /etc/vaultwarden.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
fi
|
||||
done
|
||||
elif [ -d /etc/bitwarden_rs.d ]; then
|
||||
echo "### You are using the old /etc/bitwarden_rs.d script directory, please migrate to /etc/vaultwarden.d ###"
|
||||
for f in /etc/bitwarden_rs.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
@@ -12,4 +22,4 @@ if [ -d /etc/bitwarden_rs.d ]; then
|
||||
done
|
||||
fi
|
||||
|
||||
exec /bitwarden_rs "${@}"
|
||||
exec /vaultwarden "${@}"
|
||||
|
@@ -10,7 +10,7 @@ Docker Hub hooks provide these predefined [environment variables](https://docs.d
|
||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
||||
|
||||
The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/database/OS combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
||||
The current multi-arch image build relies on the original vaultwarden Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
||||
|
||||
## References
|
||||
|
||||
|
@@ -1,19 +1,16 @@
|
||||
# The default Debian-based images support these arches for all database connections
|
||||
#
|
||||
# Other images (Alpine-based) currently
|
||||
# support only a subset of these.
|
||||
# The default Debian-based images support these arches for all database backends.
|
||||
arches=(
|
||||
amd64
|
||||
arm32v6
|
||||
arm32v7
|
||||
arm64v8
|
||||
armv6
|
||||
armv7
|
||||
arm64
|
||||
)
|
||||
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
# The Alpine build currently only works for amd64.
|
||||
os_suffix=.alpine
|
||||
# The Alpine image build currently only works for certain arches.
|
||||
distro_suffix=.alpine
|
||||
arches=(
|
||||
amd64
|
||||
arm32v7
|
||||
armv7
|
||||
)
|
||||
fi
|
||||
|
33
hooks/build
@@ -4,11 +4,42 @@ echo ">>> Building images..."
|
||||
|
||||
source ./hooks/arches.sh
|
||||
|
||||
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
||||
# This var is typically predefined by Docker Hub, but it won't be
|
||||
# when testing locally.
|
||||
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||
fi
|
||||
|
||||
# Construct a version string in the style of `build.rs`.
|
||||
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||
else
|
||||
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||
fi
|
||||
|
||||
LABELS=(
|
||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||
org.opencontainers.image.licenses="GPL-3.0-only"
|
||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
||||
org.opencontainers.image.url="https://hub.docker.com/r/${DOCKER_REPO#*/}"
|
||||
org.opencontainers.image.version="${SOURCE_VERSION}"
|
||||
)
|
||||
LABEL_ARGS=()
|
||||
for label in "${LABELS[@]}"; do
|
||||
LABEL_ARGS+=(--label "${label}")
|
||||
done
|
||||
|
||||
set -ex
|
||||
|
||||
for arch in "${arches[@]}"; do
|
||||
docker build \
|
||||
"${LABEL_ARGS[@]}" \
|
||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||
-f docker/${arch}/Dockerfile${os_suffix} \
|
||||
-f docker/${arch}/Dockerfile${distro_suffix} \
|
||||
.
|
||||
done
|
||||
|
28
hooks/pre_build
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# If requested, print some environment info for troubleshooting.
|
||||
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
|
||||
id
|
||||
pwd
|
||||
df -h
|
||||
env
|
||||
docker info
|
||||
docker version
|
||||
fi
|
||||
|
||||
# Install build dependencies.
|
||||
deps=(
|
||||
jq
|
||||
)
|
||||
apt-get update
|
||||
apt-get install -y "${deps[@]}"
|
||||
|
||||
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
|
||||
# Git operations that we perform later, so fetch the complete history and
|
||||
# tags first. Note that if the build is cached, the clone may have been
|
||||
# unshallowed already; if so, unshallowing will fail, so skip it.
|
||||
if [[ -f .git/shallow ]]; then
|
||||
git fetch --unshallow --tags
|
||||
fi
|
211
hooks/push
@@ -1,117 +1,138 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo ">>> Pushing images..."
|
||||
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
|
||||
declare -A annotations=(
|
||||
[amd64]="--os linux --arch amd64"
|
||||
[arm32v6]="--os linux --arch arm --variant v6"
|
||||
[arm32v7]="--os linux --arch arm --variant v7"
|
||||
[arm64v8]="--os linux --arch arm64 --variant v8"
|
||||
)
|
||||
|
||||
source ./hooks/arches.sh
|
||||
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
|
||||
# Join a list of args with a single char.
|
||||
# Ref: https://stackoverflow.com/a/17841619
|
||||
join() { local IFS="$1"; shift; echo "$*"; }
|
||||
|
||||
set -ex
|
||||
|
||||
declare -A images
|
||||
echo ">>> Starting local Docker registry..."
|
||||
|
||||
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
||||
# builds, but it can't access existing images on the Docker host (like the
|
||||
# cross-compiled ones we just built). Those images first need to be pushed to
|
||||
# a registry -- Docker Hub could be used, but since it's not trivial to clean
|
||||
# up those intermediate images on Docker Hub, it's easier to just run a local
|
||||
# Docker registry, which gets cleaned up automatically once the build job ends.
|
||||
#
|
||||
# https://docs.docker.com/registry/deploying/
|
||||
# https://hub.docker.com/_/registry
|
||||
#
|
||||
# Use host networking so the buildx container can access the registry via
|
||||
# localhost.
|
||||
#
|
||||
docker run -d --name registry --network host registry:2 # defaults to port 5000
|
||||
|
||||
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
||||
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
||||
LOCAL_REGISTRY="localhost:5000"
|
||||
REPO="${DOCKER_REPO#*/}"
|
||||
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
||||
|
||||
echo ">>> Pushing images to local registry..."
|
||||
|
||||
for arch in ${arches[@]}; do
|
||||
images[$arch]="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
||||
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
||||
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
||||
docker tag "${docker_image}" "${local_image}"
|
||||
docker push "${local_image}"
|
||||
done
|
||||
|
||||
# Push the images that were just built; manifest list creation fails if the
|
||||
# images (manifests) referenced don't already exist in the Docker registry.
|
||||
for image in "${images[@]}"; do
|
||||
docker push "${image}"
|
||||
done
|
||||
echo ">>> Setting up Docker Buildx..."
|
||||
|
||||
manifest_lists=("${DOCKER_REPO}:${DOCKER_TAG}")
|
||||
# Same as earlier, use host networking so the buildx container can access the
|
||||
# registry via localhost.
|
||||
#
|
||||
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
||||
#
|
||||
docker buildx create --name builder --use --driver-opt network=host
|
||||
|
||||
# If the Docker tag starts with a version number, assume the latest release is
|
||||
# being pushed. Add an extra manifest (`latest` or `alpine`, as appropriate)
|
||||
echo ">>> Running Docker Buildx..."
|
||||
|
||||
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
||||
|
||||
# If the Docker tag starts with a version number, assume the latest release
|
||||
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
|
||||
# to make it easier for users to track the latest release.
|
||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
manifest_lists+=(${DOCKER_REPO}:alpine)
|
||||
tags+=(${DOCKER_REPO}:alpine)
|
||||
else
|
||||
manifest_lists+=(${DOCKER_REPO}:latest)
|
||||
|
||||
# Add an extra `latest-arm32v6` tag; Docker can't seem to properly
|
||||
# auto-select that image on Armv6 platforms like Raspberry Pi 1 and Zero
|
||||
# (https://github.com/moby/moby/issues/41017).
|
||||
#
|
||||
# Add this tag only for the SQLite image, as the MySQL and PostgreSQL
|
||||
# builds don't currently work on non-amd64 arches.
|
||||
#
|
||||
# TODO: Also add an `alpine-arm32v6` tag if multi-arch support for
|
||||
# Alpine-based bitwarden_rs images is implemented before this Docker
|
||||
# issue is fixed.
|
||||
if [[ ${DOCKER_REPO} == *server ]]; then
|
||||
docker tag "${DOCKER_REPO}:${DOCKER_TAG}-arm32v6" "${DOCKER_REPO}:latest-arm32v6"
|
||||
docker push "${DOCKER_REPO}:latest-arm32v6"
|
||||
fi
|
||||
tags+=(${DOCKER_REPO}:latest)
|
||||
fi
|
||||
fi
|
||||
|
||||
for manifest_list in "${manifest_lists[@]}"; do
|
||||
# Create the (multi-arch) manifest list of arch-specific images.
|
||||
docker manifest create ${manifest_list} ${images[@]}
|
||||
|
||||
# Make sure each image manifest is annotated with the correct arch info.
|
||||
# Docker does not auto-detect the arch of each cross-compiled image, so
|
||||
# everything would appear as `linux/amd64` otherwise.
|
||||
for arch in "${arches[@]}"; do
|
||||
docker manifest annotate ${annotations[$arch]} ${manifest_list} ${images[$arch]}
|
||||
done
|
||||
|
||||
# Push the manifest list.
|
||||
docker manifest push --purge ${manifest_list}
|
||||
tag_args=()
|
||||
for tag in "${tags[@]}"; do
|
||||
tag_args+=(--tag "${tag}")
|
||||
done
|
||||
|
||||
# Avoid logging credentials and tokens.
|
||||
set +ex
|
||||
|
||||
# Delete the arch-specific tags, if credentials for doing so are available.
|
||||
# Note that `DOCKER_PASSWORD` must be the actual user password. Passing a JWT
|
||||
# obtained using a personal access token results in a 403 error with
|
||||
# {"detail": "access to the resource is forbidden with personal access token"}
|
||||
if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Given a JSON input on stdin, extract the string value associated with the
|
||||
# specified key. This avoids an extra dependency on a tool like `jq`.
|
||||
extract() {
|
||||
local key="$1"
|
||||
# Extract "<key>":"<val>" (assumes key/val won't contain double quotes).
|
||||
# The colon may have whitespace on either side.
|
||||
grep -o "\"${key}\"[[:space:]]*:[[:space:]]*\"[^\"]\+\"" |
|
||||
# Extract just <val> by deleting the last '"', and then greedily deleting
|
||||
# everything up to '"'.
|
||||
sed -e 's/"$//' -e 's/.*"//'
|
||||
}
|
||||
|
||||
echo ">>> Getting API token..."
|
||||
jwt=$(curl -sS -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"username\":\"${DOCKER_USERNAME}\",\"password\": \"${DOCKER_PASSWORD}\"}" \
|
||||
"https://hub.docker.com/v2/users/login" |
|
||||
extract 'token')
|
||||
|
||||
# Strip the registry portion from `index.docker.io/user/repo`.
|
||||
repo="${DOCKER_REPO#*/}"
|
||||
|
||||
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
|
||||
# the arch list to a platform list (assuming the OS is always `linux`).
|
||||
declare -A arch_to_platform=(
|
||||
[amd64]="linux/amd64"
|
||||
[armv6]="linux/arm/v6"
|
||||
[armv7]="linux/arm/v7"
|
||||
[arm64]="linux/arm64"
|
||||
)
|
||||
platforms=()
|
||||
for arch in ${arches[@]}; do
|
||||
# Don't delete the `arm32v6` tag; Docker can't seem to properly
|
||||
# auto-select that image on Armv6 platforms like Raspberry Pi 1 and Zero
|
||||
# (https://github.com/moby/moby/issues/41017).
|
||||
if [[ ${arch} == 'arm32v6' ]]; then
|
||||
continue
|
||||
fi
|
||||
tag="${DOCKER_TAG}-${arch}"
|
||||
echo ">>> Deleting '${repo}:${tag}'..."
|
||||
curl -sS -X DELETE \
|
||||
-H "Authorization: Bearer ${jwt}" \
|
||||
"https://hub.docker.com/v2/repositories/${repo}/tags/${tag}/"
|
||||
platforms+=("${arch_to_platform[$arch]}")
|
||||
done
|
||||
platforms="$(join "," "${platforms[@]}")"
|
||||
|
||||
# Run the build, pushing the resulting images and multi-arch manifest list to
|
||||
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
||||
# context, which isn't needed here since the actual cross-compiled images
|
||||
# have already been built.
|
||||
docker buildx build \
|
||||
--network host \
|
||||
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
||||
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
||||
--platform "${platforms}" \
|
||||
"${tag_args[@]}" \
|
||||
--push \
|
||||
- < ./docker/Dockerfile.buildx
|
||||
|
||||
# Add an extra arch-specific tag for `arm32v6`; Docker can't seem to properly
|
||||
# auto-select that image on ARMv6 platforms like Raspberry Pi 1 and Zero
|
||||
# (https://github.com/moby/moby/issues/41017).
|
||||
#
|
||||
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
|
||||
# existing vaultwarden tags, which adhere to the naming conventions of the
|
||||
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
|
||||
# Unfortunately, these per-arch repo names aren't always consistent with the
|
||||
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
|
||||
# 32-bit ARM arches (e.g., `linux/arm/v6` is used, not `linux/arm32/v6`).
|
||||
#
|
||||
# TODO: It looks like this issue should be fixed starting in Docker 20.10.0,
|
||||
# so this step can be removed once fixed versions are in wider distribution.
|
||||
#
|
||||
# Tags:
|
||||
#
|
||||
# testing => testing-arm32v6
|
||||
# testing-alpine => <ignored>
|
||||
# x.y.z => x.y.z-arm32v6, latest-arm32v6
|
||||
# x.y.z-alpine => <ignored>
|
||||
#
|
||||
if [[ "${DOCKER_TAG}" != *alpine ]]; then
|
||||
image="${DOCKER_REPO}":"${DOCKER_TAG}"
|
||||
|
||||
# Fetch the multi-arch manifest list and find the digest of the armv6 image.
|
||||
filter='.manifests|.[]|select(.platform.architecture=="arm" and .platform.variant=="v6")|.digest'
|
||||
digest="$(docker manifest inspect "${image}" | jq -r "${filter}")"
|
||||
|
||||
# Pull the armv6 image by digest, retag it, and repush it.
|
||||
docker pull "${DOCKER_REPO}"@"${digest}"
|
||||
docker tag "${DOCKER_REPO}"@"${digest}" "${image}"-arm32v6
|
||||
docker push "${image}"-arm32v6
|
||||
|
||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
docker tag "${image}"-arm32v6 "${DOCKER_REPO}:latest"-arm32v6
|
||||
docker push "${DOCKER_REPO}:latest"-arm32v6
|
||||
fi
|
||||
fi
|
||||
|
1
migrations/mysql/2021-03-11-190243_add_sends/down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE sends;
|
25
migrations/mysql/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
||||
CREATE TABLE sends (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
user_uuid CHAR(36) REFERENCES users (uuid),
|
||||
organization_uuid CHAR(36) REFERENCES organizations (uuid),
|
||||
|
||||
name TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
|
||||
atype INTEGER NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
akey TEXT NOT NULL,
|
||||
password_hash BLOB,
|
||||
password_salt BLOB,
|
||||
password_iter INTEGER,
|
||||
|
||||
max_access_count INTEGER,
|
||||
access_count INTEGER NOT NULL,
|
||||
|
||||
creation_date DATETIME NOT NULL,
|
||||
revision_date DATETIME NOT NULL,
|
||||
expiration_date DATETIME,
|
||||
deletion_date DATETIME NOT NULL,
|
||||
|
||||
disabled BOOLEAN NOT NULL
|
||||
);
|
@@ -0,0 +1 @@
|
||||
DROP TABLE sends;
|
25
migrations/postgresql/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
||||
CREATE TABLE sends (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
user_uuid CHAR(36) REFERENCES users (uuid),
|
||||
organization_uuid CHAR(36) REFERENCES organizations (uuid),
|
||||
|
||||
name TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
|
||||
atype INTEGER NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
key TEXT NOT NULL,
|
||||
password_hash BYTEA,
|
||||
password_salt BYTEA,
|
||||
password_iter INTEGER,
|
||||
|
||||
max_access_count INTEGER,
|
||||
access_count INTEGER NOT NULL,
|
||||
|
||||
creation_date TIMESTAMP NOT NULL,
|
||||
revision_date TIMESTAMP NOT NULL,
|
||||
expiration_date TIMESTAMP,
|
||||
deletion_date TIMESTAMP NOT NULL,
|
||||
|
||||
disabled BOOLEAN NOT NULL
|
||||
);
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE sends RENAME COLUMN key TO akey;
|
1
migrations/sqlite/2021-03-11-190243_add_sends/down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE sends;
|
25
migrations/sqlite/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
||||
CREATE TABLE sends (
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
user_uuid TEXT REFERENCES users (uuid),
|
||||
organization_uuid TEXT REFERENCES organizations (uuid),
|
||||
|
||||
name TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
|
||||
atype INTEGER NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
key TEXT NOT NULL,
|
||||
password_hash BLOB,
|
||||
password_salt BLOB,
|
||||
password_iter INTEGER,
|
||||
|
||||
max_access_count INTEGER,
|
||||
access_count INTEGER NOT NULL,
|
||||
|
||||
creation_date DATETIME NOT NULL,
|
||||
revision_date DATETIME NOT NULL,
|
||||
expiration_date DATETIME,
|
||||
deletion_date DATETIME NOT NULL,
|
||||
|
||||
disabled BOOLEAN NOT NULL
|
||||
);
|
@@ -0,0 +1 @@
|
||||
ALTER TABLE sends RENAME COLUMN key TO akey;
|
@@ -1 +1 @@
|
||||
nightly-2020-11-22
|
||||
nightly-2021-04-14
|
@@ -1,2 +1,7 @@
|
||||
version = "Two"
|
||||
edition = "2018"
|
||||
max_width = 120
|
||||
newline_style = "Unix"
|
||||
use_small_heuristics = "Off"
|
||||
struct_lit_single_line = false
|
||||
overflow_delimited_expr = true
|
||||
|
244
src/api/admin.rs
@@ -1,7 +1,7 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
use std::process::Command;
|
||||
use std::{env, time::Duration};
|
||||
|
||||
use rocket::{
|
||||
http::{Cookie, Cookies, SameSite},
|
||||
@@ -12,13 +12,13 @@ use rocket::{
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult},
|
||||
api::{ApiResult, EmptyResult, NumberOrString},
|
||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||
config::ConfigBuilder,
|
||||
db::{backup_database, models::*, DbConn, DbConnType},
|
||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||
error::{Error, MapResult},
|
||||
mail,
|
||||
util::{get_display_size, format_naive_datetime_local},
|
||||
util::{format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -39,6 +39,7 @@ pub fn routes() -> Vec<Route> {
|
||||
disable_user,
|
||||
enable_user,
|
||||
remove_2fa,
|
||||
update_user_org_type,
|
||||
update_revision_users,
|
||||
post_config,
|
||||
delete_config,
|
||||
@@ -46,17 +47,25 @@ pub fn routes() -> Vec<Route> {
|
||||
test_smtp,
|
||||
users_overview,
|
||||
organizations_overview,
|
||||
delete_organization,
|
||||
diagnostics,
|
||||
get_diagnostics_config
|
||||
]
|
||||
}
|
||||
|
||||
static CAN_BACKUP: Lazy<bool> = Lazy::new(|| {
|
||||
static DB_TYPE: Lazy<&str> = Lazy::new(|| {
|
||||
DbConnType::from_url(&CONFIG.database_url())
|
||||
.map(|t| t == DbConnType::sqlite)
|
||||
.unwrap_or(false)
|
||||
&& Command::new("sqlite3").arg("-version").status().is_ok()
|
||||
.map(|t| match t {
|
||||
DbConnType::sqlite => "SQLite",
|
||||
DbConnType::mysql => "MySQL",
|
||||
DbConnType::postgresql => "PostgreSQL",
|
||||
})
|
||||
.unwrap_or("Unknown")
|
||||
});
|
||||
|
||||
static CAN_BACKUP: Lazy<bool> =
|
||||
Lazy::new(|| DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false));
|
||||
|
||||
#[get("/")]
|
||||
fn admin_disabled() -> &'static str {
|
||||
"The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it"
|
||||
@@ -82,6 +91,27 @@ impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct IpHeader(Option<String>);
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for IpHeader {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
if req.headers().get_one(&CONFIG.ip_header()).is_some() {
|
||||
Outcome::Success(IpHeader(Some(CONFIG.ip_header())))
|
||||
} else if req.headers().get_one("X-Client-IP").is_some() {
|
||||
Outcome::Success(IpHeader(Some(String::from("X-Client-IP"))))
|
||||
} else if req.headers().get_one("X-Real-IP").is_some() {
|
||||
Outcome::Success(IpHeader(Some(String::from("X-Real-IP"))))
|
||||
} else if req.headers().get_one("X-Forwarded-For").is_some() {
|
||||
Outcome::Success(IpHeader(Some(String::from("X-Forwarded-For"))))
|
||||
} else {
|
||||
Outcome::Success(IpHeader(None))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Used for `Location` response headers, which must specify an absolute URI
|
||||
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
|
||||
fn admin_url(referer: Referer) -> String {
|
||||
@@ -107,7 +137,12 @@ fn admin_url(referer: Referer) -> String {
|
||||
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||
// If there is an error, show it
|
||||
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
||||
let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg, "urlpath": CONFIG.domain_path()});
|
||||
let json = json!({
|
||||
"page_content": "admin/login",
|
||||
"version": VERSION,
|
||||
"error": msg,
|
||||
"urlpath": CONFIG.domain_path()
|
||||
});
|
||||
|
||||
// Return the page
|
||||
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
||||
@@ -131,10 +166,7 @@ fn post_admin_login(
|
||||
// If the token is invalid, redirect to login page
|
||||
if !_validate_token(&data.token) {
|
||||
error!("Invalid admin token. IP: {}", ip.ip);
|
||||
Err(Flash::error(
|
||||
Redirect::to(admin_url(referer)),
|
||||
"Invalid admin token, please try again.",
|
||||
))
|
||||
Err(Flash::error(Redirect::to(admin_url(referer)), "Invalid admin token, please try again."))
|
||||
} else {
|
||||
// If the token received is valid, generate JWT and save it as a cookie
|
||||
let claims = generate_admin_claims();
|
||||
@@ -277,24 +309,25 @@ fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||
}
|
||||
|
||||
#[get("/logout")]
|
||||
fn logout(mut cookies: Cookies, referer: Referer) -> Result<Redirect, ()> {
|
||||
fn logout(mut cookies: Cookies, referer: Referer) -> Redirect {
|
||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||
Ok(Redirect::to(admin_url(referer)))
|
||||
Redirect::to(admin_url(referer))
|
||||
}
|
||||
|
||||
#[get("/users")]
|
||||
fn get_users_json(_token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
||||
let users = User::get_all(&conn);
|
||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||
|
||||
Ok(Json(Value::Array(users_json)))
|
||||
Json(Value::Array(users_json))
|
||||
}
|
||||
|
||||
#[get("/users/overview")]
|
||||
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users = User::get_all(&conn);
|
||||
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
|
||||
let users_json: Vec<Value> = users.iter()
|
||||
let users_json: Vec<Value> = users
|
||||
.iter()
|
||||
.map(|u| {
|
||||
let mut usr = u.to_json(&conn);
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
||||
@@ -304,10 +337,11 @@ fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
|
||||
usr["last_active"] = match u.last_active(&conn) {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
|
||||
None => json!("Never")
|
||||
None => json!("Never"),
|
||||
};
|
||||
usr
|
||||
}).collect();
|
||||
})
|
||||
.collect();
|
||||
|
||||
let text = AdminTemplateData::users(users_json).render()?;
|
||||
Ok(Html(text))
|
||||
@@ -354,6 +388,40 @@ fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct UserOrgTypeData {
|
||||
user_type: NumberOrString,
|
||||
user_uuid: String,
|
||||
org_uuid: String,
|
||||
}
|
||||
|
||||
#[post("/users/org_type", data = "<data>")]
|
||||
fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let data: UserOrgTypeData = data.into_inner();
|
||||
|
||||
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("The specified user isn't member of the organization"),
|
||||
};
|
||||
|
||||
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) {
|
||||
Some(new_type) => new_type as i32,
|
||||
None => err!("Invalid type"),
|
||||
};
|
||||
|
||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
// Removing owner permmission, check that there are at least another owner
|
||||
let num_owners = UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).len();
|
||||
|
||||
if num_owners <= 1 {
|
||||
err!("Can't change the type of the last owner")
|
||||
}
|
||||
}
|
||||
|
||||
user_to_edit.atype = new_type as i32;
|
||||
user_to_edit.save(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/update_revision")]
|
||||
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&conn)
|
||||
@@ -362,19 +430,28 @@ fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
#[get("/organizations/overview")]
|
||||
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let organizations = Organization::get_all(&conn);
|
||||
let organizations_json: Vec<Value> = organizations.iter().map(|o| {
|
||||
let organizations_json: Vec<Value> = organizations
|
||||
.iter()
|
||||
.map(|o| {
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
||||
org
|
||||
}).collect();
|
||||
})
|
||||
.collect();
|
||||
|
||||
let text = AdminTemplateData::organizations(organizations_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[post("/organizations/<uuid>/delete")]
|
||||
fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(&uuid, &conn).map_res("Organization doesn't exist")?;
|
||||
org.delete(&conn)
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct WebVaultVersion {
|
||||
version: String,
|
||||
@@ -391,77 +468,120 @@ struct GitCommit {
|
||||
}
|
||||
|
||||
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
||||
use std::time::Duration;
|
||||
let github_api = Client::builder().build()?;
|
||||
let github_api = get_reqwest_client();
|
||||
|
||||
Ok(
|
||||
github_api.get(url)
|
||||
.timeout(Duration::from_secs(10))
|
||||
.header(USER_AGENT, "Bitwarden_RS")
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.json::<T>()?
|
||||
)
|
||||
Ok(github_api.get(url).timeout(Duration::from_secs(10)).send()?.error_for_status()?.json::<T>()?)
|
||||
}
|
||||
|
||||
fn has_http_access() -> bool {
|
||||
let http_access = get_reqwest_client();
|
||||
|
||||
match http_access.head("https://github.com/dani-garcia/vaultwarden").timeout(Duration::from_secs(10)).send() {
|
||||
Ok(r) => r.status().is_success(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
fn diagnostics(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use std::net::ToSocketAddrs;
|
||||
use chrono::prelude::*;
|
||||
fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use crate::util::read_file_string;
|
||||
use chrono::prelude::*;
|
||||
use std::net::ToSocketAddrs;
|
||||
|
||||
let vault_version_path = format!("{}/{}", CONFIG.web_vault_folder(), "version.json");
|
||||
let vault_version_str = read_file_string(&vault_version_path)?;
|
||||
let web_vault_version: WebVaultVersion = serde_json::from_str(&vault_version_str)?;
|
||||
// Get current running versions
|
||||
let web_vault_version: WebVaultVersion =
|
||||
match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "bwrs-version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => WebVaultVersion {
|
||||
version: String::from("Version file missing"),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
let github_ips = ("github.com", 0).to_socket_addrs().map(|mut i| i.next());
|
||||
let (dns_resolved, dns_ok) = match github_ips {
|
||||
Ok(Some(a)) => (a.ip().to_string(), true),
|
||||
_ => ("Could not resolve domain name.".to_string(), false),
|
||||
// Execute some environment checks
|
||||
let running_within_docker = is_running_in_docker();
|
||||
let has_http_access = has_http_access();
|
||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||
|| env::var_os("http_proxy").is_some()
|
||||
|| env::var_os("HTTPS_PROXY").is_some()
|
||||
|| env::var_os("https_proxy").is_some();
|
||||
|
||||
// Check if we are able to resolve DNS entries
|
||||
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||
Ok(Some(a)) => a.ip().to_string(),
|
||||
_ => "Could not resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
// If the DNS Check failed, do not even attempt to check for new versions since we were not able to resolve github.com
|
||||
let (latest_release, latest_commit, latest_web_build) = if dns_ok {
|
||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
|
||||
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
|
||||
(
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bitwarden_rs/releases/latest") {
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest") {
|
||||
Ok(r) => r.tag_name,
|
||||
_ => "-".to_string()
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/bitwarden_rs/commits/master") {
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main") {
|
||||
Ok(mut c) => {
|
||||
c.sha.truncate(8);
|
||||
c.sha
|
||||
}
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
_ => "-".to_string()
|
||||
},
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest") {
|
||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||
_ => "-".to_string()
|
||||
// Do not fetch the web-vault version when running within Docker.
|
||||
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||
if running_within_docker {
|
||||
"-".to_string()
|
||||
} else {
|
||||
match get_github_api::<GitRelease>(
|
||||
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||
) {
|
||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||
_ => "-".to_string(),
|
||||
}
|
||||
},
|
||||
)
|
||||
} else {
|
||||
("-".to_string(), "-".to_string(), "-".to_string())
|
||||
};
|
||||
|
||||
// Run the date check as the last item right before filling the json.
|
||||
// This should ensure that the time difference between the browser and the server is as minimal as possible.
|
||||
let dt = Utc::now();
|
||||
let server_time = dt.format("%Y-%m-%d %H:%M:%S UTC").to_string();
|
||||
let ip_header_name = match &ip_header.0 {
|
||||
Some(h) => h,
|
||||
_ => "",
|
||||
};
|
||||
|
||||
let diagnostics_json = json!({
|
||||
"dns_resolved": dns_resolved,
|
||||
"server_time": server_time,
|
||||
"web_vault_version": web_vault_version.version,
|
||||
"latest_release": latest_release,
|
||||
"latest_commit": latest_commit,
|
||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||
"web_vault_version": web_vault_version.version,
|
||||
"latest_web_build": latest_web_build,
|
||||
"running_within_docker": running_within_docker,
|
||||
"has_http_access": has_http_access,
|
||||
"ip_header_exists": &ip_header.0.is_some(),
|
||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
||||
"ip_header_name": ip_header_name,
|
||||
"ip_header_config": &CONFIG.ip_header(),
|
||||
"uses_proxy": uses_proxy,
|
||||
"db_type": *DB_TYPE,
|
||||
"db_version": get_sql_server_version(&conn),
|
||||
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||
});
|
||||
|
||||
let text = AdminTemplateData::diagnostics(diagnostics_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/diagnostics/config")]
|
||||
fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
|
||||
let support_json = CONFIG.get_support_json();
|
||||
Json(support_json)
|
||||
}
|
||||
|
||||
#[post("/config", data = "<data>")]
|
||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||
let data: ConfigBuilder = data.into_inner();
|
||||
@@ -474,11 +594,11 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
}
|
||||
|
||||
#[post("/config/backup_db")]
|
||||
fn backup_db(_token: AdminToken) -> EmptyResult {
|
||||
fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
if *CAN_BACKUP {
|
||||
backup_database()
|
||||
backup_database(&conn)
|
||||
} else {
|
||||
err!("Can't back up current DB (either it's not SQLite or the 'sqlite' binary is not present)");
|
||||
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,6 @@
|
||||
use chrono::Utc;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
|
||||
@@ -94,7 +95,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
None => {
|
||||
// Order is important here; the invitation check must come first
|
||||
// because the bitwarden_rs admin can invite anyone, regardless
|
||||
// because the vaultwarden admin can invite anyone, regardless
|
||||
// of other signup restrictions.
|
||||
if Invitation::take(&data.Email, &conn) || CONFIG.is_signup_allowed(&data.Email) {
|
||||
User::new(data.Email.clone())
|
||||
@@ -139,10 +140,8 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
|
||||
user.last_verifying_at = Some(user.created_at);
|
||||
} else {
|
||||
if let Err(e) = mail::send_welcome(&user.email) {
|
||||
error!("Error sending welcome email: {:#?}", e);
|
||||
}
|
||||
} else if let Err(e) = mail::send_welcome(&user.email) {
|
||||
error!("Error sending welcome email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -150,8 +149,8 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
|
||||
#[get("/accounts/profile")]
|
||||
fn profile(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
Ok(Json(headers.user.to_json(&conn)))
|
||||
fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
Json(headers.user.to_json(&conn))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -321,15 +320,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
err!("The cipher is not owned by the user")
|
||||
}
|
||||
|
||||
update_cipher_from_data(
|
||||
&mut saved_cipher,
|
||||
cipher_data,
|
||||
&headers,
|
||||
false,
|
||||
&conn,
|
||||
&nt,
|
||||
UpdateType::CipherUpdate,
|
||||
)?
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::CipherUpdate)?
|
||||
}
|
||||
|
||||
// Update user data
|
||||
@@ -612,7 +603,7 @@ struct PreloginData {
|
||||
}
|
||||
|
||||
#[post("/accounts/prelogin", data = "<data>")]
|
||||
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
||||
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
let data: PreloginData = data.into_inner().data;
|
||||
|
||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
||||
@@ -620,10 +611,10 @@ fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Kdf": kdf_type,
|
||||
"KdfIterations": kdf_iter
|
||||
})))
|
||||
}))
|
||||
}
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
|
@@ -13,7 +13,7 @@ use crate::{
|
||||
api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -25,7 +25,7 @@ pub fn routes() -> Vec<Route> {
|
||||
// whether the user is an owner/admin of the relevant org, and if so,
|
||||
// allows the operation unconditionally.
|
||||
//
|
||||
// bitwarden_rs factors in the org owner/admin status as part of
|
||||
// vaultwarden factors in the org owner/admin status as part of
|
||||
// determining the write accessibility of a cipher, so most
|
||||
// admin/non-admin implementations can be shared.
|
||||
routes![
|
||||
@@ -77,6 +77,15 @@ pub fn routes() -> Vec<Route> {
|
||||
]
|
||||
}
|
||||
|
||||
pub fn purge_trashed_ciphers(pool: DbPool) {
|
||||
debug!("Purging trashed ciphers");
|
||||
if let Ok(conn) = pool.get() {
|
||||
Cipher::purge_trash(&conn);
|
||||
} else {
|
||||
error!("Failed to get DB connection while purging trashed ciphers")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(FromForm, Default)]
|
||||
struct SyncData {
|
||||
#[form(field = "excludeDomains")]
|
||||
@@ -84,55 +93,57 @@ struct SyncData {
|
||||
}
|
||||
|
||||
#[get("/sync?<data..>")]
|
||||
fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let user_json = headers.user.to_json(&conn);
|
||||
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||
|
||||
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
||||
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
|
||||
let collections_json: Vec<Value> =
|
||||
collections.iter().map(|c| c.to_json_details(&headers.user.uuid, &conn)).collect();
|
||||
|
||||
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
|
||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||
let ciphers_json: Vec<Value> = ciphers
|
||||
.iter()
|
||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||
.collect();
|
||||
let ciphers_json: Vec<Value> =
|
||||
ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
||||
|
||||
let sends = Send::find_by_user(&headers.user.uuid, &conn);
|
||||
let sends_json: Vec<Value> = sends.iter().map(|s| s.to_json()).collect();
|
||||
|
||||
let domains_json = if data.exclude_domains {
|
||||
Value::Null
|
||||
} else {
|
||||
api::core::_get_eq_domains(headers, true).unwrap().into_inner()
|
||||
api::core::_get_eq_domains(headers, true).into_inner()
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Profile": user_json,
|
||||
"Folders": folders_json,
|
||||
"Collections": collections_json,
|
||||
"Policies": policies_json,
|
||||
"Ciphers": ciphers_json,
|
||||
"Domains": domains_json,
|
||||
"Sends": sends_json,
|
||||
"unofficialServer": true,
|
||||
"Object": "sync"
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/ciphers")]
|
||||
fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn get_ciphers(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||
|
||||
let ciphers_json: Vec<Value> = ciphers
|
||||
.iter()
|
||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||
.collect();
|
||||
let ciphers_json: Vec<Value> =
|
||||
ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Data": ciphers_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/ciphers/<uuid>")]
|
||||
@@ -225,6 +236,17 @@ fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn:
|
||||
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
let mut data: ShareCipherData = data.into_inner().data;
|
||||
|
||||
// Check if there are one more more collections selected when this cipher is part of an organization.
|
||||
// err if this is not the case before creating an empty cipher.
|
||||
if data.Cipher.OrganizationId.is_some() && data.CollectionIds.is_empty() {
|
||||
err!("You must select at least one collection.");
|
||||
}
|
||||
|
||||
// This check is usually only needed in update_cipher_from_data(), but we
|
||||
// need it here as well to avoid creating an empty cipher in the call to
|
||||
// cipher.save() below.
|
||||
enforce_personal_ownership_policy(&data.Cipher, &headers, &conn)?;
|
||||
|
||||
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
|
||||
cipher.user_uuid = Some(headers.user.uuid.clone());
|
||||
cipher.save(&conn)?;
|
||||
@@ -251,6 +273,24 @@ fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
}
|
||||
|
||||
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
|
||||
/// A non-owner/admin user belonging to an org with the personal ownership policy
|
||||
/// enabled isn't allowed to create new user-owned ciphers or modify existing ones
|
||||
/// (that were created before the policy was applicable to the user). The user is
|
||||
/// allowed to delete or share such ciphers to an org, however.
|
||||
///
|
||||
/// Ref: https://bitwarden.com/help/article/policies/#personal-ownership
|
||||
fn enforce_personal_ownership_policy(data: &CipherData, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
if data.OrganizationId.is_none() {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let policy_type = OrgPolicyType::PersonalOwnership;
|
||||
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||
err!("Due to an Enterprise Policy, you are restricted from saving items to your personal vault.")
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_cipher_from_data(
|
||||
cipher: &mut Cipher,
|
||||
data: CipherData,
|
||||
@@ -260,13 +300,16 @@ pub fn update_cipher_from_data(
|
||||
nt: &Notify,
|
||||
ut: UpdateType,
|
||||
) -> EmptyResult {
|
||||
enforce_personal_ownership_policy(&data, headers, conn)?;
|
||||
|
||||
// Check that the client isn't updating an existing cipher with stale data.
|
||||
if let Some(dt) = data.LastKnownRevisionDate {
|
||||
match NaiveDateTime::parse_from_str(&dt, "%+") { // ISO 8601 format
|
||||
Err(err) =>
|
||||
warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 =>
|
||||
err!("The client copy of this cipher is out of date. Resync the client and try again."),
|
||||
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
||||
// ISO 8601 format
|
||||
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
||||
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
||||
}
|
||||
Ok(_) => (),
|
||||
}
|
||||
}
|
||||
@@ -284,6 +327,11 @@ pub fn update_cipher_from_data(
|
||||
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn)
|
||||
{
|
||||
cipher.organization_uuid = Some(org_id);
|
||||
// After some discussion in PR #1329 re-added the user_uuid = None again.
|
||||
// TODO: Audit/Check the whole save/update cipher chain.
|
||||
// Upstream uses the user_uuid to allow a cipher added by a user to an org to still allow the user to view/edit the cipher
|
||||
// even when the user has hide-passwords configured as there policy.
|
||||
// Removing the line below would fix that, but we have to check which effect this would have on the rest of the code.
|
||||
cipher.user_uuid = None;
|
||||
} else {
|
||||
err!("You don't have permission to add cipher directly to organization")
|
||||
@@ -327,6 +375,20 @@ pub fn update_cipher_from_data(
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup cipher data, like removing the 'Response' key.
|
||||
// This key is somewhere generated during Javascript so no way for us this fix this.
|
||||
// Also, upstream only retrieves keys they actually want to store, and thus skip the 'Response' key.
|
||||
// We do not mind which data is in it, the keep our model more flexible when there are upstream changes.
|
||||
// But, we at least know we do not need to store and return this specific key.
|
||||
fn _clean_cipher_data(mut json_data: Value) -> Value {
|
||||
if json_data.is_array() {
|
||||
json_data.as_array_mut().unwrap().iter_mut().for_each(|ref mut f| {
|
||||
f.as_object_mut().unwrap().remove("Response");
|
||||
});
|
||||
};
|
||||
json_data
|
||||
}
|
||||
|
||||
let type_data_opt = match data.Type {
|
||||
1 => data.Login,
|
||||
2 => data.SecureNote,
|
||||
@@ -335,23 +397,22 @@ pub fn update_cipher_from_data(
|
||||
_ => err!("Invalid type"),
|
||||
};
|
||||
|
||||
let mut type_data = match type_data_opt {
|
||||
Some(data) => data,
|
||||
let type_data = match type_data_opt {
|
||||
Some(mut data) => {
|
||||
// Remove the 'Response' key from the base object.
|
||||
data.as_object_mut().unwrap().remove("Response");
|
||||
// Remove the 'Response' key from every Uri.
|
||||
if data["Uris"].is_array() {
|
||||
data["Uris"] = _clean_cipher_data(data["Uris"].clone());
|
||||
}
|
||||
data
|
||||
}
|
||||
None => err!("Data missing"),
|
||||
};
|
||||
|
||||
// TODO: ******* Backwards compat start **********
|
||||
// To remove backwards compatibility, just delete this code,
|
||||
// and remove the compat code from cipher::to_json
|
||||
type_data["Name"] = Value::String(data.Name.clone());
|
||||
type_data["Notes"] = data.Notes.clone().map(Value::String).unwrap_or(Value::Null);
|
||||
type_data["Fields"] = data.Fields.clone().unwrap_or(Value::Null);
|
||||
type_data["PasswordHistory"] = data.PasswordHistory.clone().unwrap_or(Value::Null);
|
||||
// TODO: ******* Backwards compat end **********
|
||||
|
||||
cipher.name = data.Name;
|
||||
cipher.notes = data.Notes;
|
||||
cipher.fields = data.Fields.map(|f| f.to_string());
|
||||
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string());
|
||||
cipher.data = type_data.to_string();
|
||||
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
||||
|
||||
@@ -526,11 +587,8 @@ fn post_collections_admin(
|
||||
}
|
||||
|
||||
let posted_collections: HashSet<String> = data.CollectionIds.iter().cloned().collect();
|
||||
let current_collections: HashSet<String> = cipher
|
||||
.get_collections(&headers.user.uuid, &conn)
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
let current_collections: HashSet<String> =
|
||||
cipher.get_collections(&headers.user.uuid, &conn).iter().cloned().collect();
|
||||
|
||||
for collection in posted_collections.symmetric_difference(¤t_collections) {
|
||||
match Collection::find_by_uuid(&collection, &conn) {
|
||||
@@ -766,24 +824,25 @@ fn post_attachment(
|
||||
let file_name = HEXLOWER.encode(&crypto::get_random(vec![0; 10]));
|
||||
let path = base_path.join(&file_name);
|
||||
|
||||
let size = match field.data.save().memory_threshold(0).size_limit(size_limit).with_path(path.clone()) {
|
||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||
SaveResult::Full(other) => {
|
||||
std::fs::remove_file(path).ok();
|
||||
error = Some(format!("Attachment is not a file: {:?}", other));
|
||||
return;
|
||||
}
|
||||
SaveResult::Partial(_, reason) => {
|
||||
std::fs::remove_file(path).ok();
|
||||
error = Some(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
||||
return;
|
||||
}
|
||||
SaveResult::Error(e) => {
|
||||
std::fs::remove_file(path).ok();
|
||||
error = Some(format!("Error: {:?}", e));
|
||||
return;
|
||||
}
|
||||
};
|
||||
let size =
|
||||
match field.data.save().memory_threshold(0).size_limit(size_limit).with_path(path.clone()) {
|
||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||
SaveResult::Full(other) => {
|
||||
std::fs::remove_file(path).ok();
|
||||
error = Some(format!("Attachment is not a file: {:?}", other));
|
||||
return;
|
||||
}
|
||||
SaveResult::Partial(_, reason) => {
|
||||
std::fs::remove_file(path).ok();
|
||||
error = Some(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
||||
return;
|
||||
}
|
||||
SaveResult::Error(e) => {
|
||||
std::fs::remove_file(path).ok();
|
||||
error = Some(format!("Error: {:?}", e));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let mut attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size);
|
||||
attachment.akey = attachment_key.clone();
|
||||
@@ -918,28 +977,38 @@ fn delete_cipher_selected_admin(data: JsonUpcase<Value>, headers: Headers, conn:
|
||||
}
|
||||
|
||||
#[post("/ciphers/delete-admin", data = "<data>")]
|
||||
fn delete_cipher_selected_post_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
fn delete_cipher_selected_post_admin(
|
||||
data: JsonUpcase<Value>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify,
|
||||
) -> EmptyResult {
|
||||
delete_cipher_selected_post(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/delete-admin", data = "<data>")]
|
||||
fn delete_cipher_selected_put_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
fn delete_cipher_selected_put_admin(
|
||||
data: JsonUpcase<Value>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify,
|
||||
) -> EmptyResult {
|
||||
delete_cipher_selected_put(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>/restore")]
|
||||
fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>/restore-admin")]
|
||||
fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/restore", data = "<data>")]
|
||||
fn restore_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_restore_multiple_ciphers(data, headers, conn, nt)
|
||||
fn restore_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
_restore_multiple_ciphers(data, &headers, &conn, &nt)
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -1025,7 +1094,6 @@ fn delete_all(
|
||||
Some(user_org) => {
|
||||
if user_org.atype == UserOrgType::Owner {
|
||||
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
|
||||
Collection::delete_all_by_organization(&org_data.org_id, &conn)?;
|
||||
nt.send_user_update(UpdateType::Vault, &user);
|
||||
Ok(())
|
||||
} else {
|
||||
@@ -1075,7 +1143,13 @@ fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, soft_del
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _delete_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, soft_delete: bool, nt: Notify) -> EmptyResult {
|
||||
fn _delete_multiple_ciphers(
|
||||
data: JsonUpcase<Value>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
soft_delete: bool,
|
||||
nt: Notify,
|
||||
) -> EmptyResult {
|
||||
let data: Value = data.into_inner().data;
|
||||
|
||||
let uuids = match data.get("Ids") {
|
||||
@@ -1095,7 +1169,7 @@ fn _delete_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbC
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult {
|
||||
fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
@@ -1109,10 +1183,10 @@ fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &No
|
||||
cipher.save(&conn)?;
|
||||
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
Ok(())
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
}
|
||||
|
||||
fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||
let data: Value = data.into_inner().data;
|
||||
|
||||
let uuids = match data.get("Ids") {
|
||||
@@ -1123,13 +1197,19 @@ fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: Db
|
||||
None => err!("Request missing ids field"),
|
||||
};
|
||||
|
||||
let mut ciphers: Vec<Value> = Vec::new();
|
||||
for uuid in uuids {
|
||||
if let error @ Err(_) = _restore_cipher_by_uuid(uuid, &headers, &conn, &nt) {
|
||||
return error;
|
||||
};
|
||||
match _restore_cipher_by_uuid(uuid, headers, conn, nt) {
|
||||
Ok(json) => ciphers.push(json.into_inner()),
|
||||
err => return err,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(Json(json!({
|
||||
"Data": ciphers,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}
|
||||
|
||||
fn _delete_cipher_attachment_by_id(
|
||||
|
@@ -8,28 +8,20 @@ use crate::{
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
get_folders,
|
||||
get_folder,
|
||||
post_folders,
|
||||
post_folder,
|
||||
put_folder,
|
||||
delete_folder_post,
|
||||
delete_folder,
|
||||
]
|
||||
routes![get_folders, get_folder, post_folders, post_folder, put_folder, delete_folder_post, delete_folder,]
|
||||
}
|
||||
|
||||
#[get("/folders")]
|
||||
fn get_folders(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||
|
||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Data": folders_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/folders/<uuid>")]
|
||||
|
@@ -2,17 +2,15 @@ mod accounts;
|
||||
mod ciphers;
|
||||
mod folders;
|
||||
mod organizations;
|
||||
mod sends;
|
||||
pub mod two_factor;
|
||||
|
||||
pub use ciphers::purge_trashed_ciphers;
|
||||
pub use sends::purge_sends;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut mod_routes = routes![
|
||||
clear_device_token,
|
||||
put_device_token,
|
||||
get_eq_domains,
|
||||
post_eq_domains,
|
||||
put_eq_domains,
|
||||
hibp_breach,
|
||||
];
|
||||
let mut mod_routes =
|
||||
routes![clear_device_token, put_device_token, get_eq_domains, post_eq_domains, put_eq_domains, hibp_breach,];
|
||||
|
||||
let mut routes = Vec::new();
|
||||
routes.append(&mut accounts::routes());
|
||||
@@ -20,6 +18,7 @@ pub fn routes() -> Vec<Route> {
|
||||
routes.append(&mut folders::routes());
|
||||
routes.append(&mut organizations::routes());
|
||||
routes.append(&mut two_factor::routes());
|
||||
routes.append(&mut sends::routes());
|
||||
routes.append(&mut mod_routes);
|
||||
|
||||
routes
|
||||
@@ -28,19 +27,21 @@ pub fn routes() -> Vec<Route> {
|
||||
//
|
||||
// Move this somewhere else
|
||||
//
|
||||
use rocket::response::Response;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase},
|
||||
api::{JsonResult, JsonUpcase},
|
||||
auth::Headers,
|
||||
db::DbConn,
|
||||
error::Error,
|
||||
util::get_reqwest_client,
|
||||
};
|
||||
|
||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||
fn clear_device_token(uuid: String) -> EmptyResult {
|
||||
fn clear_device_token<'a>(uuid: String) -> Response<'a> {
|
||||
// This endpoint doesn't have auth header
|
||||
|
||||
let _ = uuid;
|
||||
@@ -49,11 +50,11 @@ fn clear_device_token(uuid: String) -> EmptyResult {
|
||||
// This only clears push token
|
||||
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||
Ok(())
|
||||
Response::new()
|
||||
}
|
||||
|
||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||
fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> JsonResult {
|
||||
fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> Json<Value> {
|
||||
let _data: Value = data.into_inner().data;
|
||||
// Data has a single string value "PushToken"
|
||||
let _ = uuid;
|
||||
@@ -61,13 +62,13 @@ fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) ->
|
||||
|
||||
// TODO: This should save the push token, but we don't have push functionality
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Id": headers.device.uuid,
|
||||
"Name": headers.device.name,
|
||||
"Type": headers.device.atype,
|
||||
"Identifier": headers.device.uuid,
|
||||
"CreationDate": crate::util::format_date(&headers.device.created_at),
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
@@ -81,11 +82,11 @@ struct GlobalDomain {
|
||||
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
||||
|
||||
#[get("/settings/domains")]
|
||||
fn get_eq_domains(headers: Headers) -> JsonResult {
|
||||
fn get_eq_domains(headers: Headers) -> Json<Value> {
|
||||
_get_eq_domains(headers, false)
|
||||
}
|
||||
|
||||
fn _get_eq_domains(headers: Headers, no_excluded: bool) -> JsonResult {
|
||||
fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
|
||||
let user = headers.user;
|
||||
use serde_json::from_str;
|
||||
|
||||
@@ -102,11 +103,11 @@ fn _get_eq_domains(headers: Headers, no_excluded: bool) -> JsonResult {
|
||||
globals.retain(|g| !g.Excluded);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"EquivalentDomains": equivalent_domains,
|
||||
"GlobalEquivalentDomains": globals,
|
||||
"Object": "domains",
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -141,22 +142,15 @@ fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbC
|
||||
|
||||
#[get("/hibp/breach?<username>")]
|
||||
fn hibp_breach(username: String) -> JsonResult {
|
||||
let user_agent = "Bitwarden_RS";
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
||||
username
|
||||
);
|
||||
|
||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
||||
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
let hibp_client = Client::builder().build()?;
|
||||
let hibp_client = get_reqwest_client();
|
||||
|
||||
let res = hibp_client
|
||||
.get(&url)
|
||||
.header(USER_AGENT, user_agent)
|
||||
.header("hibp-api-key", api_key)
|
||||
.send()?;
|
||||
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send()?;
|
||||
|
||||
// If we get a 404, return a 404, it means no breached accounts
|
||||
if res.status() == 404 {
|
||||
@@ -172,7 +166,7 @@ fn hibp_breach(username: String) -> JsonResult {
|
||||
"Domain": "haveibeenpwned.com",
|
||||
"BreachDate": "2019-08-18T00:00:00Z",
|
||||
"AddedDate": "2019-08-18T00:00:00Z",
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||
"LogoPath": "bwrs_static/hibp.png",
|
||||
"PwnCount": 0,
|
||||
"DataClasses": [
|
||||
|
@@ -5,7 +5,7 @@ use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType},
|
||||
auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders, ManagerHeaders, ManagerHeadersLoose},
|
||||
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
};
|
||||
@@ -47,7 +47,10 @@ pub fn routes() -> Vec<Route> {
|
||||
list_policies_token,
|
||||
get_policy,
|
||||
put_policy,
|
||||
get_organization_tax,
|
||||
get_plans,
|
||||
get_plans_tax_rates,
|
||||
import,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -189,8 +192,8 @@ fn post_organization(
|
||||
|
||||
// GET /api/collections?writeOnly=false
|
||||
#[get("/collections")]
|
||||
fn get_user_collections(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
Ok(Json(json!({
|
||||
fn get_user_collections(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
Json(json!({
|
||||
"Data":
|
||||
Collection::find_by_user_uuid(&headers.user.uuid, &conn)
|
||||
.iter()
|
||||
@@ -198,12 +201,12 @@ fn get_user_collections(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
.collect::<Value>(),
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/collections")]
|
||||
fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
Ok(Json(json!({
|
||||
fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
|
||||
Json(json!({
|
||||
"Data":
|
||||
Collection::find_by_organization(&org_id, &conn)
|
||||
.iter()
|
||||
@@ -211,7 +214,7 @@ fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) ->
|
||||
.collect::<Value>(),
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[post("/organizations/<org_id>/collections", data = "<data>")]
|
||||
@@ -330,7 +333,12 @@ fn post_organization_collection_delete_user(
|
||||
}
|
||||
|
||||
#[delete("/organizations/<org_id>/collections/<col_id>")]
|
||||
fn delete_organization_collection(org_id: String, col_id: String, _headers: ManagerHeaders, conn: DbConn) -> EmptyResult {
|
||||
fn delete_organization_collection(
|
||||
org_id: String,
|
||||
col_id: String,
|
||||
_headers: ManagerHeaders,
|
||||
conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
match Collection::find_by_uuid(&col_id, &conn) {
|
||||
None => err!("Collection not found"),
|
||||
Some(collection) => {
|
||||
@@ -423,9 +431,7 @@ fn put_collection_users(
|
||||
continue;
|
||||
}
|
||||
|
||||
CollectionUser::save(&user.user_uuid, &coll_id,
|
||||
d.ReadOnly, d.HidePasswords,
|
||||
&conn)?;
|
||||
CollectionUser::save(&user.user_uuid, &coll_id, d.ReadOnly, d.HidePasswords, &conn)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -438,30 +444,28 @@ struct OrgIdData {
|
||||
}
|
||||
|
||||
#[get("/ciphers/organization-details?<data..>")]
|
||||
fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let ciphers = Cipher::find_by_org(&data.organization_id, &conn);
|
||||
let ciphers_json: Vec<Value> = ciphers
|
||||
.iter()
|
||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||
.collect();
|
||||
let ciphers_json: Vec<Value> =
|
||||
ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Data": ciphers_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/users")]
|
||||
fn get_org_users(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> JsonResult {
|
||||
fn get_org_users(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> Json<Value> {
|
||||
let users = UserOrganization::find_by_org(&org_id, &conn);
|
||||
let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Data": users_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -541,9 +545,7 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
||||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||
None => err!("Collection not found in Organization"),
|
||||
Some(collection) => {
|
||||
CollectionUser::save(&user.uuid, &collection.uuid,
|
||||
col.ReadOnly, col.HidePasswords,
|
||||
&conn)?;
|
||||
CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, col.HidePasswords, &conn)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -652,7 +654,7 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
let mut org_name = String::from("bitwarden_rs");
|
||||
let mut org_name = CONFIG.invitation_org_name();
|
||||
if let Some(org_id) = &claims.org_id {
|
||||
org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
||||
Some(org) => org.name,
|
||||
@@ -798,9 +800,13 @@ fn edit_user(
|
||||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||
None => err!("Collection not found in Organization"),
|
||||
Some(collection) => {
|
||||
CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid,
|
||||
col.ReadOnly, col.HidePasswords,
|
||||
&conn)?;
|
||||
CollectionUser::save(
|
||||
&user_to_edit.user_uuid,
|
||||
&collection.uuid,
|
||||
col.ReadOnly,
|
||||
col.HidePasswords,
|
||||
&conn,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -896,16 +902,8 @@ fn post_org_import(
|
||||
.into_iter()
|
||||
.map(|cipher_data| {
|
||||
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
||||
update_cipher_from_data(
|
||||
&mut cipher,
|
||||
cipher_data,
|
||||
&headers,
|
||||
false,
|
||||
&conn,
|
||||
&nt,
|
||||
UpdateType::CipherCreate,
|
||||
)
|
||||
.ok();
|
||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::CipherCreate)
|
||||
.ok();
|
||||
cipher
|
||||
})
|
||||
.collect();
|
||||
@@ -927,15 +925,15 @@ fn post_org_import(
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/policies")]
|
||||
fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
|
||||
let policies = OrgPolicy::find_by_org(&org_id, &conn);
|
||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Data": policies_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/policies/token?<token>")]
|
||||
@@ -966,7 +964,7 @@ fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResul
|
||||
fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||
Some(pt) => pt,
|
||||
None => err!("Invalid policy type"),
|
||||
None => err!("Invalid or unsupported policy type"),
|
||||
};
|
||||
|
||||
let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
||||
@@ -986,7 +984,13 @@ struct PolicyData {
|
||||
}
|
||||
|
||||
#[put("/organizations/<org_id>/policies/<pol_type>", data = "<data>")]
|
||||
fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
fn put_policy(
|
||||
org_id: String,
|
||||
pol_type: i32,
|
||||
data: Json<PolicyData>,
|
||||
_headers: AdminHeaders,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: PolicyData = data.into_inner();
|
||||
|
||||
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||
@@ -1006,9 +1010,16 @@ fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: A
|
||||
Ok(Json(policy.to_json()))
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
#[get("/organizations/<org_id>/tax")]
|
||||
fn get_organization_tax(org_id: String, _headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||
// Prevent a 404 error, which also causes Javascript errors.
|
||||
err!("Only allowed when not self hosted.")
|
||||
}
|
||||
|
||||
#[get("/plans")]
|
||||
fn get_plans(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||
Ok(Json(json!({
|
||||
fn get_plans(_headers: Headers, _conn: DbConn) -> Json<Value> {
|
||||
Json(json!({
|
||||
"Object": "list",
|
||||
"Data": [
|
||||
{
|
||||
@@ -1055,5 +1066,111 @@ fn get_plans(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||
}
|
||||
],
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/plans/sales-tax-rates")]
|
||||
fn get_plans_tax_rates(_headers: Headers, _conn: DbConn) -> Json<Value> {
|
||||
// Prevent a 404 error, which also causes Javascript errors.
|
||||
Json(json!({
|
||||
"Object": "list",
|
||||
"Data": [],
|
||||
"ContinuationToken": null
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgImportGroupData {
|
||||
Name: String, // "GroupName"
|
||||
ExternalId: String, // "cn=GroupName,ou=Groups,dc=example,dc=com"
|
||||
Users: Vec<String>, // ["uid=user,ou=People,dc=example,dc=com"]
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgImportUserData {
|
||||
Email: String, // "user@maildomain.net"
|
||||
ExternalId: String, // "uid=user,ou=People,dc=example,dc=com"
|
||||
Deleted: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct OrgImportData {
|
||||
Groups: Vec<OrgImportGroupData>,
|
||||
OverwriteExisting: bool,
|
||||
Users: Vec<OrgImportUserData>,
|
||||
}
|
||||
|
||||
#[post("/organizations/<org_id>/import", data = "<data>")]
|
||||
fn import(org_id: String, data: JsonUpcase<OrgImportData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
let data = data.into_inner().data;
|
||||
|
||||
// TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way
|
||||
// to differentiate between auto-imported users and manually added ones.
|
||||
// This means that this endpoint can end up removing users that were added manually by an admin,
|
||||
// as opposed to upstream which only removes auto-imported users.
|
||||
|
||||
// User needs to be admin or owner to use the Directry Connector
|
||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
|
||||
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
|
||||
None => err!("User not part of organization"),
|
||||
};
|
||||
|
||||
for user_data in &data.Users {
|
||||
if user_data.Deleted {
|
||||
// If user is marked for deletion and it exists, delete it
|
||||
if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn) {
|
||||
user_org.delete(&conn)?;
|
||||
}
|
||||
|
||||
// If user is not part of the organization, but it exists
|
||||
} else if UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn).is_none() {
|
||||
if let Some(user) = User::find_by_mail(&user_data.Email, &conn) {
|
||||
let user_org_status = if CONFIG.mail_enabled() {
|
||||
UserOrgStatus::Invited as i32
|
||||
} else {
|
||||
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||
};
|
||||
|
||||
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||
new_org_user.access_all = false;
|
||||
new_org_user.atype = UserOrgType::User as i32;
|
||||
new_org_user.status = user_org_status;
|
||||
|
||||
new_org_user.save(&conn)?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
let org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
||||
Some(org) => org.name,
|
||||
None => err!("Error looking up organization"),
|
||||
};
|
||||
|
||||
mail::send_invite(
|
||||
&user_data.Email,
|
||||
&user.uuid,
|
||||
Some(org_id.clone()),
|
||||
Some(new_org_user.uuid),
|
||||
&org_name,
|
||||
Some(headers.user.email.clone()),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
||||
if data.OverwriteExisting {
|
||||
for user_org in UserOrganization::find_by_org_and_type(&org_id, UserOrgType::User as i32, &conn) {
|
||||
if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &conn).map(|u| u.email) {
|
||||
if !data.Users.iter().any(|u| u.Email == user_email) {
|
||||
user_org.delete(&conn)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
391
src/api/core/sends.rs
Normal file
@@ -0,0 +1,391 @@
|
||||
use std::{io::Read, path::Path};
|
||||
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||
use rocket::{http::ContentType, Data};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
auth::{Headers, Host},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![post_send, post_send_file, post_access, post_access_file, put_send, delete_send, put_remove_password]
|
||||
}
|
||||
|
||||
pub fn purge_sends(pool: DbPool) {
|
||||
debug!("Purging sends");
|
||||
if let Ok(conn) = pool.get() {
|
||||
Send::purge(&conn);
|
||||
} else {
|
||||
error!("Failed to get DB connection while purging sends")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendData {
|
||||
pub Type: i32,
|
||||
pub Key: String,
|
||||
pub Password: Option<String>,
|
||||
pub MaxAccessCount: Option<i32>,
|
||||
pub ExpirationDate: Option<DateTime<Utc>>,
|
||||
pub DeletionDate: DateTime<Utc>,
|
||||
pub Disabled: bool,
|
||||
|
||||
// Data field
|
||||
pub Name: String,
|
||||
pub Notes: Option<String>,
|
||||
pub Text: Option<Value>,
|
||||
pub File: Option<Value>,
|
||||
}
|
||||
|
||||
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||
/// an org with this policy enabled isn't allowed to create new Sends or
|
||||
/// modify existing ones, but is allowed to delete them.
|
||||
///
|
||||
/// Ref: https://bitwarden.com/help/article/policies/#disable-send
|
||||
fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let policy_type = OrgPolicyType::DisableSend;
|
||||
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||
let data_val = if data.Type == SendType::Text as i32 {
|
||||
data.Text
|
||||
} else if data.Type == SendType::File as i32 {
|
||||
data.File
|
||||
} else {
|
||||
err!("Invalid Send type")
|
||||
};
|
||||
|
||||
let data_str = if let Some(mut d) = data_val {
|
||||
d.as_object_mut().and_then(|o| o.remove("Response"));
|
||||
serde_json::to_string(&d)?
|
||||
} else {
|
||||
err!("Send data not provided");
|
||||
};
|
||||
|
||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
||||
err!(
|
||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||
);
|
||||
}
|
||||
|
||||
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
|
||||
send.user_uuid = Some(user_uuid);
|
||||
send.notes = data.Notes;
|
||||
send.max_access_count = data.MaxAccessCount;
|
||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||
send.disabled = data.Disabled;
|
||||
send.atype = data.Type;
|
||||
|
||||
send.set_password(data.Password.as_deref());
|
||||
|
||||
Ok(send)
|
||||
}
|
||||
|
||||
#[post("/sends", data = "<data>")]
|
||||
fn post_send(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
|
||||
if data.Type == SendType::File as i32 {
|
||||
err!("File sends should use /api/sends/file")
|
||||
}
|
||||
|
||||
let mut send = create_send(data, headers.user.uuid.clone())?;
|
||||
send.save(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
|
||||
fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
|
||||
let boundary = content_type.params().next().expect("No boundary provided").1;
|
||||
|
||||
let mut mpart = Multipart::with_body(data.open(), boundary);
|
||||
|
||||
// First entry is the SendData JSON
|
||||
let mut model_entry = match mpart.read_entry()? {
|
||||
Some(e) if &*e.headers.name == "model" => e,
|
||||
Some(_) => err!("Invalid entry name"),
|
||||
None => err!("No model entry present"),
|
||||
};
|
||||
|
||||
let mut buf = String::new();
|
||||
model_entry.data.read_to_string(&mut buf)?;
|
||||
let data = serde_json::from_str::<crate::util::UpCase<SendData>>(&buf)?;
|
||||
|
||||
// Get the file length and add an extra 10% to avoid issues
|
||||
const SIZE_110_MB: u64 = 115_343_360;
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn);
|
||||
if left <= 0 {
|
||||
err!("Attachment size limit reached! Delete some files to open space")
|
||||
}
|
||||
std::cmp::Ord::max(left as u64, SIZE_110_MB)
|
||||
}
|
||||
None => SIZE_110_MB,
|
||||
};
|
||||
|
||||
// Create the Send
|
||||
let mut send = create_send(data.data, headers.user.uuid.clone())?;
|
||||
let file_id: String = data_encoding::HEXLOWER.encode(&crate::crypto::get_random(vec![0; 32]));
|
||||
|
||||
if send.atype != SendType::File as i32 {
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
let file_path = Path::new(&CONFIG.sends_folder()).join(&send.uuid).join(&file_id);
|
||||
|
||||
// Read the data entry and save the file
|
||||
let mut data_entry = match mpart.read_entry()? {
|
||||
Some(e) if &*e.headers.name == "data" => e,
|
||||
Some(_) => err!("Invalid entry name"),
|
||||
None => err!("No model entry present"),
|
||||
};
|
||||
|
||||
let size = match data_entry.data.save().memory_threshold(0).size_limit(size_limit).with_path(&file_path) {
|
||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||
SaveResult::Full(other) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Attachment is not a file: {:?}", other));
|
||||
}
|
||||
SaveResult::Partial(_, reason) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
||||
}
|
||||
SaveResult::Error(e) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Error: {:?}", e));
|
||||
}
|
||||
};
|
||||
|
||||
// Set ID and sizes
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id));
|
||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size)));
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
|
||||
// Save the changes in the database
|
||||
send.save(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendAccessData {
|
||||
pub Password: Option<String>,
|
||||
}
|
||||
|
||||
#[post("/sends/access/<access_id>", data = "<data>")]
|
||||
fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn) -> JsonResult {
|
||||
let mut send = match Send::find_by_access_id(&access_id, &conn) {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
};
|
||||
|
||||
if let Some(max_access_count) = send.max_access_count {
|
||||
if send.access_count >= max_access_count {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(expiration) = send.expiration_date {
|
||||
if Utc::now().naive_utc() >= expiration {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
}
|
||||
|
||||
if Utc::now().naive_utc() >= send.deletion_date {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
|
||||
if send.disabled {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
|
||||
if send.password_hash.is_some() {
|
||||
match data.into_inner().data.Password {
|
||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||
Some(_) => err!("Invalid password."),
|
||||
None => err_code!("Password not provided", 401),
|
||||
}
|
||||
}
|
||||
|
||||
// Files are incremented during the download
|
||||
if send.atype == SendType::Text as i32 {
|
||||
send.access_count += 1;
|
||||
}
|
||||
|
||||
send.save(&conn)?;
|
||||
|
||||
Ok(Json(send.to_json_access()))
|
||||
}
|
||||
|
||||
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
||||
fn post_access_file(
|
||||
send_id: String,
|
||||
file_id: String,
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
host: Host,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_uuid(&send_id, &conn) {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
};
|
||||
|
||||
if let Some(max_access_count) = send.max_access_count {
|
||||
if send.access_count >= max_access_count {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(expiration) = send.expiration_date {
|
||||
if Utc::now().naive_utc() >= expiration {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
}
|
||||
|
||||
if Utc::now().naive_utc() >= send.deletion_date {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
|
||||
if send.disabled {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
}
|
||||
|
||||
if send.password_hash.is_some() {
|
||||
match data.into_inner().data.Password {
|
||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||
Some(_) => err!("Invalid password."),
|
||||
None => err_code!("Password not provided", 401),
|
||||
}
|
||||
}
|
||||
|
||||
send.access_count += 1;
|
||||
|
||||
send.save(&conn)?;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Object": "send-fileDownload",
|
||||
"Id": file_id,
|
||||
"Url": format!("{}/sends/{}/{}", &host.host, send_id, file_id)
|
||||
})))
|
||||
}
|
||||
|
||||
#[put("/sends/<id>", data = "<data>")]
|
||||
fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
|
||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
if send.atype != data.Type {
|
||||
err!("Sends can't change type")
|
||||
}
|
||||
|
||||
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
||||
// so we only need to update the data field in the Text case
|
||||
if data.Type == SendType::Text as i32 {
|
||||
let data_str = if let Some(mut d) = data.Text {
|
||||
d.as_object_mut().and_then(|d| d.remove("Response"));
|
||||
serde_json::to_string(&d)?
|
||||
} else {
|
||||
err!("Send data not provided");
|
||||
};
|
||||
send.data = data_str;
|
||||
}
|
||||
|
||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
||||
err!(
|
||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||
);
|
||||
}
|
||||
send.name = data.Name;
|
||||
send.akey = data.Key;
|
||||
send.deletion_date = data.DeletionDate.naive_utc();
|
||||
send.notes = data.Notes;
|
||||
send.max_access_count = data.MaxAccessCount;
|
||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||
send.disabled = data.Disabled;
|
||||
|
||||
// Only change the value if it's present
|
||||
if let Some(password) = data.Password {
|
||||
send.set_password(Some(&password));
|
||||
}
|
||||
|
||||
send.save(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[delete("/sends/<id>")]
|
||||
fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
let send = match Send::find_by_uuid(&id, &conn) {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
|
||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
send.delete(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendDelete, &headers.user);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/sends/<id>/remove-password")]
|
||||
fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
|
||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
send.set_password(None);
|
||||
send.save(&conn)?;
|
||||
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
@@ -17,11 +17,7 @@ use crate::{
|
||||
pub use crate::config::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
generate_authenticator,
|
||||
activate_authenticator,
|
||||
activate_authenticator_put,
|
||||
]
|
||||
routes![generate_authenticator, activate_authenticator, activate_authenticator_put,]
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||
@@ -141,7 +137,7 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &Cl
|
||||
// The amount of steps back and forward in time
|
||||
// Also check if we need to disable time drifted TOTP codes.
|
||||
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
|
||||
let steps: i64 = if CONFIG.authenticator_disable_time_drift() { 0 } else { 1 };
|
||||
let steps = !CONFIG.authenticator_disable_time_drift() as i64;
|
||||
|
||||
for step in -steps..=steps {
|
||||
let time_step = current_timestamp / 30i64 + step;
|
||||
@@ -163,22 +159,11 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &Cl
|
||||
twofactor.save(&conn)?;
|
||||
return Ok(());
|
||||
} else if generated == totp_code && time_step <= twofactor.last_used as i64 {
|
||||
warn!(
|
||||
"This or a TOTP code within {} steps back and forward has already been used!",
|
||||
steps
|
||||
);
|
||||
err!(format!(
|
||||
"Invalid TOTP code! Server time: {} IP: {}",
|
||||
current_time.format("%F %T UTC"),
|
||||
ip.ip
|
||||
));
|
||||
warn!("This or a TOTP code within {} steps back and forward has already been used!", steps);
|
||||
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||
}
|
||||
}
|
||||
|
||||
// Else no valide code received, deny access
|
||||
err!(format!(
|
||||
"Invalid TOTP code! Server time: {} IP: {}",
|
||||
current_time.format("%F %T UTC"),
|
||||
ip.ip
|
||||
));
|
||||
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||
}
|
||||
|
@@ -12,6 +12,7 @@ use crate::{
|
||||
DbConn,
|
||||
},
|
||||
error::MapResult,
|
||||
util::get_reqwest_client,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -59,7 +60,11 @@ impl DuoData {
|
||||
ik.replace_range(digits.., replaced);
|
||||
sk.replace_range(digits.., replaced);
|
||||
|
||||
Self { host, ik, sk }
|
||||
Self {
|
||||
host,
|
||||
ik,
|
||||
sk,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,9 +190,7 @@ fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbC
|
||||
}
|
||||
|
||||
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
|
||||
|
||||
use reqwest::{blocking::Client, header::*, Method};
|
||||
use reqwest::{header, Method};
|
||||
use std::str::FromStr;
|
||||
|
||||
// https://duo.com/docs/authapi#api-details
|
||||
@@ -199,11 +202,13 @@ fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> Em
|
||||
|
||||
let m = Method::from_str(method).unwrap_or_default();
|
||||
|
||||
Client::new()
|
||||
let client = get_reqwest_client();
|
||||
|
||||
client
|
||||
.request(m, &url)
|
||||
.basic_auth(username, Some(password))
|
||||
.header(USER_AGENT, AGENT)
|
||||
.header(DATE, date)
|
||||
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
|
||||
.header(header::DATE, date)
|
||||
.send()?
|
||||
.error_for_status()?;
|
||||
|
||||
|
@@ -125,11 +125,7 @@ fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -
|
||||
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
|
||||
|
||||
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||
let twofactor = TwoFactor::new(
|
||||
user.uuid,
|
||||
TwoFactorType::EmailVerificationChallenge,
|
||||
twofactor_data.to_json(),
|
||||
);
|
||||
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
|
||||
twofactor.save(&conn)?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
@@ -186,7 +182,8 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
/// Validate the email code when used as TwoFactor token mechanism
|
||||
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
|
||||
let mut email_data = EmailTokenData::from_json(&data)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn).map_res("Two factor not found")?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn)
|
||||
.map_res("Two factor not found")?;
|
||||
let issued_token = match &email_data.last_token {
|
||||
Some(t) => t,
|
||||
_ => err!("No token available"),
|
||||
|
@@ -20,13 +20,7 @@ pub mod u2f;
|
||||
pub mod yubikey;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut routes = routes![
|
||||
get_twofactor,
|
||||
get_recover,
|
||||
recover,
|
||||
disable_twofactor,
|
||||
disable_twofactor_put,
|
||||
];
|
||||
let mut routes = routes![get_twofactor, get_recover, recover, disable_twofactor, disable_twofactor_put,];
|
||||
|
||||
routes.append(&mut authenticator::routes());
|
||||
routes.append(&mut duo::routes());
|
||||
@@ -38,15 +32,15 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
#[get("/two-factor")]
|
||||
fn get_twofactor(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn get_twofactor(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
|
||||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"Data": twofactors_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-recover", data = "<data>")]
|
||||
|
@@ -28,13 +28,7 @@ static APP_ID: Lazy<String> = Lazy::new(|| format!("{}/app-id.json", &CONFIG.dom
|
||||
static U2F: Lazy<U2f> = Lazy::new(|| U2f::new(APP_ID.clone()));
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
generate_u2f,
|
||||
generate_u2f_challenge,
|
||||
activate_u2f,
|
||||
activate_u2f_put,
|
||||
delete_u2f,
|
||||
]
|
||||
routes![generate_u2f, generate_u2f_challenge, activate_u2f, activate_u2f_put, delete_u2f,]
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-u2f", data = "<data>")]
|
||||
@@ -131,12 +125,12 @@ struct RegisterResponseCopy {
|
||||
pub error_code: Option<NumberOrString>,
|
||||
}
|
||||
|
||||
impl Into<RegisterResponse> for RegisterResponseCopy {
|
||||
fn into(self) -> RegisterResponse {
|
||||
impl From<RegisterResponseCopy> for RegisterResponse {
|
||||
fn from(r: RegisterResponseCopy) -> RegisterResponse {
|
||||
RegisterResponse {
|
||||
registration_data: self.registration_data,
|
||||
version: self.version,
|
||||
client_data: self.client_data,
|
||||
registration_data: r.registration_data,
|
||||
version: r.version,
|
||||
client_data: r.client_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -161,10 +155,7 @@ fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn)
|
||||
|
||||
let response: RegisterResponseCopy = serde_json::from_str(&data.DeviceResponse)?;
|
||||
|
||||
let error_code = response
|
||||
.error_code
|
||||
.clone()
|
||||
.map_or("0".into(), NumberOrString::into_string);
|
||||
let error_code = response.error_code.clone().map_or("0".into(), NumberOrString::into_string);
|
||||
|
||||
if error_code != "0" {
|
||||
err!("Error registering U2F token")
|
||||
@@ -300,20 +291,13 @@ fn _old_parse_registrations(registations: &str) -> Vec<Registration> {
|
||||
|
||||
let regs: Vec<Value> = serde_json::from_str(registations).expect("Can't parse Registration data");
|
||||
|
||||
regs.into_iter()
|
||||
.map(|r| serde_json::from_value(r).unwrap())
|
||||
.map(|Helper(r)| r)
|
||||
.collect()
|
||||
regs.into_iter().map(|r| serde_json::from_value(r).unwrap()).map(|Helper(r)| r).collect()
|
||||
}
|
||||
|
||||
pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRequest> {
|
||||
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn);
|
||||
|
||||
let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn)?
|
||||
.1
|
||||
.into_iter()
|
||||
.map(|r| r.reg)
|
||||
.collect();
|
||||
let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.reg).collect();
|
||||
|
||||
if registrations.is_empty() {
|
||||
err!("No U2F devices registered")
|
||||
|
186
src/api/icons.rs
@@ -11,16 +11,17 @@ use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use reqwest::{blocking::Client, blocking::Response, header, Url};
|
||||
use rocket::{http::ContentType, http::Cookie, response::Content, Route};
|
||||
use soup::prelude::*;
|
||||
|
||||
use crate::{error::Error, util::Cached, CONFIG};
|
||||
use crate::{
|
||||
error::Error,
|
||||
util::{get_reqwest_client_builder, Cached},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![icon]
|
||||
}
|
||||
|
||||
const ALLOWED_CHARS: &str = "_-.";
|
||||
|
||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
// Generate the default headers
|
||||
let mut default_headers = header::HeaderMap::new();
|
||||
@@ -28,31 +29,47 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
default_headers.insert(header::ACCEPT_LANGUAGE, header::HeaderValue::from_static("en-US,en;q=0.8"));
|
||||
default_headers.insert(header::CACHE_CONTROL, header::HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::PRAGMA, header::HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::ACCEPT, header::HeaderValue::from_static("text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8"));
|
||||
default_headers.insert(
|
||||
header::ACCEPT,
|
||||
header::HeaderValue::from_static(
|
||||
"text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8",
|
||||
),
|
||||
);
|
||||
|
||||
// Reuse the client between requests
|
||||
Client::builder()
|
||||
get_reqwest_client_builder()
|
||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||
.default_headers(default_headers)
|
||||
.build()
|
||||
.unwrap()
|
||||
.expect("Failed to build icon client")
|
||||
});
|
||||
|
||||
// Build Regex only once since this takes a lot of time.
|
||||
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap());
|
||||
static ICON_REL_BLACKLIST: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)mask-icon").unwrap());
|
||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||
|
||||
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
||||
static ICON_BLACKLIST_REGEX: Lazy<RwLock<HashMap<String, Regex>>> = Lazy::new(|| RwLock::new(HashMap::new()));
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon(domain: String) -> Option<Cached<Content<Vec<u8>>>> {
|
||||
fn icon(domain: String) -> Cached<Content<Vec<u8>>> {
|
||||
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
||||
|
||||
if !is_valid_domain(&domain) {
|
||||
warn!("Invalid domain: {}", domain);
|
||||
return None;
|
||||
return Cached::ttl(
|
||||
Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
CONFIG.icon_cache_negttl(),
|
||||
);
|
||||
}
|
||||
|
||||
get_icon(&domain).map(|icon| Cached::long(Content(ContentType::new("image", "x-icon"), icon)))
|
||||
match get_icon(&domain) {
|
||||
Some((icon, icon_type)) => {
|
||||
Cached::ttl(Content(ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl())
|
||||
}
|
||||
_ => Cached::ttl(Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns if the domain provided is valid or not.
|
||||
@@ -60,6 +77,8 @@ fn icon(domain: String) -> Option<Cached<Content<Vec<u8>>>> {
|
||||
/// This does some manual checks and makes use of Url to do some basic checking.
|
||||
/// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255.
|
||||
fn is_valid_domain(domain: &str) -> bool {
|
||||
const ALLOWED_CHARS: &str = "_-.";
|
||||
|
||||
// If parsing the domain fails using Url, it will not work with reqwest.
|
||||
if let Err(parse_error) = Url::parse(format!("https://{}", domain).as_str()) {
|
||||
debug!("Domain parse error: '{}' - {:?}", domain, parse_error);
|
||||
@@ -70,7 +89,10 @@ fn is_valid_domain(domain: &str) -> bool {
|
||||
|| domain.starts_with('-')
|
||||
|| domain.ends_with('-')
|
||||
{
|
||||
debug!("Domain validation error: '{}' is either empty, contains '..', starts with an '.', starts or ends with a '-'", domain);
|
||||
debug!(
|
||||
"Domain validation error: '{}' is either empty, contains '..', starts with an '.', starts or ends with a '-'",
|
||||
domain
|
||||
);
|
||||
return false;
|
||||
} else if domain.len() > 255 {
|
||||
debug!("Domain validation error: '{}' exceeds 255 characters", domain);
|
||||
@@ -239,7 +261,7 @@ fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
is_blacklisted
|
||||
}
|
||||
|
||||
fn get_icon(domain: &str) -> Option<Vec<u8>> {
|
||||
fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||
|
||||
// Check for expiration of negatively cached copy
|
||||
@@ -248,7 +270,11 @@ fn get_icon(domain: &str) -> Option<Vec<u8>> {
|
||||
}
|
||||
|
||||
if let Some(icon) = get_cached_icon(&path) {
|
||||
return Some(icon);
|
||||
let icon_type = match get_icon_type(&icon) {
|
||||
Some(x) => x,
|
||||
_ => "x-icon",
|
||||
};
|
||||
return Some((icon, icon_type.to_string()));
|
||||
}
|
||||
|
||||
if CONFIG.disable_icon_download() {
|
||||
@@ -257,9 +283,9 @@ fn get_icon(domain: &str) -> Option<Vec<u8>> {
|
||||
|
||||
// Get the icon, or None in case of error
|
||||
match download_icon(&domain) {
|
||||
Ok(icon) => {
|
||||
Ok((icon, icon_type)) => {
|
||||
save_icon(&path, &icon);
|
||||
Some(icon)
|
||||
Some((icon, icon_type.unwrap_or("x-icon").to_string()))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error downloading icon: {:?}", e);
|
||||
@@ -320,7 +346,6 @@ fn icon_is_expired(path: &str) -> bool {
|
||||
expired.unwrap_or(true)
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Icon {
|
||||
priority: u8,
|
||||
href: String,
|
||||
@@ -328,7 +353,54 @@ struct Icon {
|
||||
|
||||
impl Icon {
|
||||
const fn new(priority: u8, href: String) -> Self {
|
||||
Self { href, priority }
|
||||
Self {
|
||||
href,
|
||||
priority,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_favicons_node(node: &std::rc::Rc<markup5ever_rcdom::Node>, icons: &mut Vec<Icon>, url: &Url) {
|
||||
if let markup5ever_rcdom::NodeData::Element {
|
||||
name,
|
||||
attrs,
|
||||
..
|
||||
} = &node.data
|
||||
{
|
||||
if name.local.as_ref() == "link" {
|
||||
let mut has_rel = false;
|
||||
let mut href = None;
|
||||
let mut sizes = None;
|
||||
|
||||
let attrs = attrs.borrow();
|
||||
for attr in attrs.iter() {
|
||||
let attr_name = attr.name.local.as_ref();
|
||||
let attr_value = attr.value.as_ref();
|
||||
|
||||
if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) && !ICON_REL_BLACKLIST.is_match(attr_value)
|
||||
{
|
||||
has_rel = true;
|
||||
} else if attr_name == "href" {
|
||||
href = Some(attr_value);
|
||||
} else if attr_name == "sizes" {
|
||||
sizes = Some(attr_value);
|
||||
}
|
||||
}
|
||||
|
||||
if has_rel {
|
||||
if let Some(inner_href) = href {
|
||||
if let Ok(full_href) = url.join(&inner_href).map(|h| h.into_string()) {
|
||||
let priority = get_icon_priority(&full_href, sizes);
|
||||
icons.push(Icon::new(priority, full_href));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Might want to limit the recursion depth?
|
||||
for child in node.children.borrow().iter() {
|
||||
get_favicons_node(child, icons, url);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -431,30 +503,14 @@ fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
|
||||
// 512KB should be more than enough for the HTML, though as we only really need
|
||||
// the HTML header, it could potentially be reduced even further
|
||||
let limited_reader = content.take(512 * 1024);
|
||||
let mut limited_reader = content.take(512 * 1024);
|
||||
|
||||
let soup = Soup::from_reader(limited_reader)?;
|
||||
// Search for and filter
|
||||
let favicons = soup
|
||||
.tag("link")
|
||||
.attr("rel", ICON_REL_REGEX.clone()) // Only use icon rels
|
||||
.attr_name("href") // Make sure there is a href
|
||||
.find_all();
|
||||
use html5ever::tendril::TendrilSink;
|
||||
let dom = html5ever::parse_document(markup5ever_rcdom::RcDom::default(), Default::default())
|
||||
.from_utf8()
|
||||
.read_from(&mut limited_reader)?;
|
||||
|
||||
// Loop through all the found icons and determine it's priority
|
||||
for favicon in favicons {
|
||||
let sizes = favicon.get("sizes");
|
||||
let href = favicon.get("href").unwrap();
|
||||
// Skip invalid url's
|
||||
let full_href = match url.join(&href) {
|
||||
Ok(h) => h.into_string(),
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
let priority = get_icon_priority(&full_href, sizes);
|
||||
|
||||
iconlist.push(Icon::new(priority, full_href))
|
||||
}
|
||||
get_favicons_node(&dom.document, &mut iconlist, &url);
|
||||
} else {
|
||||
// Add the default favicon.ico to the list with just the given domain
|
||||
iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain)));
|
||||
@@ -465,10 +521,10 @@ fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
iconlist.sort_by_key(|x| x.priority);
|
||||
|
||||
// There always is an icon in the list, so no need to check if it exists, and just return the first one
|
||||
Ok(IconUrlResult{
|
||||
Ok(IconUrlResult {
|
||||
iconlist,
|
||||
cookies: cookie_str,
|
||||
referer
|
||||
referer,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -489,9 +545,7 @@ fn get_page_with_cookies(url: &str, cookie_str: &str, referer: &str) -> Result<R
|
||||
client = client.header("Referer", referer)
|
||||
}
|
||||
|
||||
client.send()?
|
||||
.error_for_status()
|
||||
.map_err(Into::into)
|
||||
client.send()?.error_for_status().map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
||||
@@ -506,7 +560,7 @@ fn get_page_with_cookies(url: &str, cookie_str: &str, referer: &str) -> Result<R
|
||||
/// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32");
|
||||
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
||||
/// ```
|
||||
fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
|
||||
fn get_icon_priority(href: &str, sizes: Option<&str>) -> u8 {
|
||||
// Check if there is a dimension set
|
||||
let (width, height) = parse_sizes(sizes);
|
||||
|
||||
@@ -554,7 +608,7 @@ fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
|
||||
/// let (width, height) = parse_sizes("x128x128"); // (128, 128)
|
||||
/// let (width, height) = parse_sizes("32"); // (0, 0)
|
||||
/// ```
|
||||
fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
|
||||
fn parse_sizes(sizes: Option<&str>) -> (u16, u16) {
|
||||
let mut width: u16 = 0;
|
||||
let mut height: u16 = 0;
|
||||
|
||||
@@ -573,7 +627,7 @@ fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
|
||||
(width, height)
|
||||
}
|
||||
|
||||
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||
fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
|
||||
if is_domain_blacklisted(domain) {
|
||||
err!("Domain is blacklisted", domain)
|
||||
}
|
||||
@@ -581,6 +635,7 @@ fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||
let icon_result = get_icon_url(&domain)?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let mut icon_type: Option<&str> = None;
|
||||
|
||||
use data_url::DataUrl;
|
||||
|
||||
@@ -592,29 +647,43 @@ fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||
Ok((body, _fragment)) => {
|
||||
// Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create
|
||||
if body.len() >= 67 {
|
||||
// Check if the icon type is allowed, else try an icon from the list.
|
||||
icon_type = get_icon_type(&body);
|
||||
if icon_type.is_none() {
|
||||
debug!("Icon from {} data:image uri, is not a valid image type", domain);
|
||||
continue;
|
||||
}
|
||||
info!("Extracted icon from data:image uri for {}", domain);
|
||||
buffer = body;
|
||||
break;
|
||||
}
|
||||
}
|
||||
_ => warn!("data uri is invalid"),
|
||||
_ => warn!("Extracted icon from data:image uri is invalid"),
|
||||
};
|
||||
} else {
|
||||
match get_page_with_cookies(&icon.href, &icon_result.cookies, &icon_result.referer) {
|
||||
Ok(mut res) => {
|
||||
info!("Downloaded icon from {}", icon.href);
|
||||
res.copy_to(&mut buffer)?;
|
||||
// Check if the icon type is allowed, else try an icon from the list.
|
||||
icon_type = get_icon_type(&buffer);
|
||||
if icon_type.is_none() {
|
||||
buffer.clear();
|
||||
debug!("Icon from {}, is not a valid image type", icon.href);
|
||||
continue;
|
||||
}
|
||||
info!("Downloaded icon from {}", icon.href);
|
||||
break;
|
||||
},
|
||||
}
|
||||
_ => warn!("Download failed for {}", icon.href),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if buffer.is_empty() {
|
||||
err!("Empty response")
|
||||
err!("Empty response downloading icon")
|
||||
}
|
||||
|
||||
Ok(buffer)
|
||||
Ok((buffer, icon_type))
|
||||
}
|
||||
|
||||
fn save_icon(path: &str, icon: &[u8]) {
|
||||
@@ -626,7 +695,18 @@ fn save_icon(path: &str, icon: &[u8]) {
|
||||
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache");
|
||||
}
|
||||
Err(e) => {
|
||||
info!("Icon save error: {:?}", e);
|
||||
warn!("Icon save error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
|
||||
match bytes {
|
||||
[137, 80, 78, 71, ..] => Some("png"),
|
||||
[0, 0, 1, 0, ..] => Some("x-icon"),
|
||||
[82, 73, 70, 70, ..] => Some("webp"),
|
||||
[255, 216, 255, ..] => Some("jpeg"),
|
||||
[66, 77, ..] => Some("bmp"),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
@@ -72,7 +72,8 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||
"scope": "api offline_access"
|
||||
"scope": "api offline_access",
|
||||
"unofficialServer": true,
|
||||
})))
|
||||
}
|
||||
|
||||
@@ -87,34 +88,28 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
let username = data.username.as_ref().unwrap();
|
||||
let user = match User::find_by_mail(username, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!(
|
||||
"Username or password is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||
),
|
||||
None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)),
|
||||
};
|
||||
|
||||
// Check password
|
||||
let password = data.password.as_ref().unwrap();
|
||||
if !user.check_valid_password(password) {
|
||||
err!(
|
||||
"Username or password is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||
)
|
||||
err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||
}
|
||||
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
err!(
|
||||
"This user has been disabled",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||
)
|
||||
err!("This user has been disabled", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||
}
|
||||
|
||||
let now = Local::now();
|
||||
|
||||
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
||||
let now = now.naive_utc();
|
||||
if user.last_verifying_at.is_none() || now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds() > CONFIG.signups_verify_resend_time() as i64 {
|
||||
if user.last_verifying_at.is_none()
|
||||
|| now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds()
|
||||
> CONFIG.signups_verify_resend_time() as i64
|
||||
{
|
||||
let resend_limit = CONFIG.signups_verify_resend_limit() as i32;
|
||||
if resend_limit == 0 || user.login_verify_count < resend_limit {
|
||||
// We want to send another email verification if we require signups to verify
|
||||
@@ -134,10 +129,7 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
}
|
||||
|
||||
// We still want the login to fail until they actually verified the email address
|
||||
err!(
|
||||
"Please verify your email before trying again.",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||
)
|
||||
err!("Please verify your email before trying again.", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||
}
|
||||
|
||||
let (mut device, new_device) = get_device(&data, &conn, &user);
|
||||
@@ -168,11 +160,12 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
"Key": user.akey,
|
||||
"PrivateKey": user.private_key,
|
||||
//"TwoFactorToken": "11122233333444555666777888999"
|
||||
|
||||
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"ResetMasterPassword": false,// TODO: Same as above
|
||||
"scope": "api offline_access"
|
||||
"scope": "api offline_access",
|
||||
"unofficialServer": true,
|
||||
});
|
||||
|
||||
if let Some(token) = twofactor_token {
|
||||
@@ -234,9 +227,7 @@ fn twofactor_auth(
|
||||
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA token not provided"),
|
||||
};
|
||||
|
||||
let selected_twofactor = twofactors
|
||||
.into_iter()
|
||||
.find(|tf| tf.atype == selected_id && tf.enabled);
|
||||
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
||||
|
||||
use crate::api::core::two_factor as _tf;
|
||||
use crate::crypto::ct_eq;
|
||||
@@ -245,18 +236,26 @@ fn twofactor_auth(
|
||||
let mut remember = data.two_factor_remember.unwrap_or(0);
|
||||
|
||||
match TwoFactorType::from_i32(selected_id) {
|
||||
Some(TwoFactorType::Authenticator) => _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn)?,
|
||||
Some(TwoFactorType::Authenticator) => {
|
||||
_tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn)?
|
||||
}
|
||||
Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?,
|
||||
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?,
|
||||
Some(TwoFactorType::Duo) => _tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?,
|
||||
Some(TwoFactorType::Email) => _tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn)?,
|
||||
Some(TwoFactorType::Duo) => {
|
||||
_tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?
|
||||
}
|
||||
Some(TwoFactorType::Email) => {
|
||||
_tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn)?
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Remember) => {
|
||||
match device.twofactor_remember {
|
||||
Some(ref code) if !CONFIG.disable_2fa_remember() && ct_eq(code, twofactor_code) => {
|
||||
remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time
|
||||
}
|
||||
_ => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA Remember token not provided"),
|
||||
_ => {
|
||||
err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA Remember token not provided")
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => err!("Invalid two factor provider"),
|
||||
|
@@ -10,6 +10,8 @@ use serde_json::Value;
|
||||
|
||||
pub use crate::api::{
|
||||
admin::routes as admin_routes,
|
||||
core::purge_sends,
|
||||
core::purge_trashed_ciphers,
|
||||
core::routes as core_routes,
|
||||
icons::routes as icons_routes,
|
||||
identity::routes as identity_routes,
|
||||
@@ -53,9 +55,9 @@ impl NumberOrString {
|
||||
use std::num::ParseIntError as PIE;
|
||||
match self {
|
||||
NumberOrString::Number(n) => Ok(n),
|
||||
NumberOrString::String(s) => s
|
||||
.parse()
|
||||
.map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string())),
|
||||
NumberOrString::String(s) => {
|
||||
s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -4,12 +4,7 @@ use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value as JsonValue;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult},
|
||||
auth::Headers,
|
||||
db::DbConn,
|
||||
Error, CONFIG,
|
||||
};
|
||||
use crate::{api::EmptyResult, auth::Headers, db::DbConn, Error, CONFIG};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![negotiate, websockets_err]
|
||||
@@ -19,12 +14,15 @@ static SHOW_WEBSOCKETS_MSG: AtomicBool = AtomicBool::new(true);
|
||||
|
||||
#[get("/hub")]
|
||||
fn websockets_err() -> EmptyResult {
|
||||
if CONFIG.websocket_enabled() && SHOW_WEBSOCKETS_MSG.compare_and_swap(true, false, Ordering::Relaxed) {
|
||||
if CONFIG.websocket_enabled()
|
||||
&& SHOW_WEBSOCKETS_MSG.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed).is_ok()
|
||||
{
|
||||
err!(
|
||||
"###########################################################
|
||||
"
|
||||
###########################################################
|
||||
'/notifications/hub' should be proxied to the websocket server or notifications won't work.
|
||||
Go to the Wiki for more info, or disable WebSockets setting WEBSOCKET_ENABLED=false.
|
||||
###########################################################################################"
|
||||
###########################################################################################\n"
|
||||
)
|
||||
} else {
|
||||
Err(Error::empty())
|
||||
@@ -32,7 +30,7 @@ fn websockets_err() -> EmptyResult {
|
||||
}
|
||||
|
||||
#[post("/hub/negotiate")]
|
||||
fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||
fn negotiate(_headers: Headers, _conn: DbConn) -> Json<JsonValue> {
|
||||
use crate::crypto;
|
||||
use data_encoding::BASE64URL;
|
||||
|
||||
@@ -48,10 +46,10 @@ fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||
// Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33
|
||||
// {"transport":"ServerSentEvents", "transferFormats":["Text"]},
|
||||
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
|
||||
Ok(Json(json!({
|
||||
Json(json!({
|
||||
"connectionId": conn_id,
|
||||
"availableTransports": available_transports
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
//
|
||||
@@ -121,7 +119,7 @@ fn convert_option<T: Into<Value>>(option: Option<T>) -> Value {
|
||||
}
|
||||
|
||||
// Server WebSocket handler
|
||||
pub struct WSHandler {
|
||||
pub struct WsHandler {
|
||||
out: Sender,
|
||||
user_uuid: Option<String>,
|
||||
users: WebSocketUsers,
|
||||
@@ -141,7 +139,7 @@ const PING: Token = Token(1);
|
||||
|
||||
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
||||
|
||||
impl WSHandler {
|
||||
impl WsHandler {
|
||||
fn err(&self, msg: &'static str) -> ws::Result<()> {
|
||||
self.out.close(ws::CloseCode::Invalid)?;
|
||||
|
||||
@@ -161,14 +159,14 @@ impl WSHandler {
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Otherwise verify the query parameter value
|
||||
let path = hs.request.resource();
|
||||
if let Some(params) = path.split('?').nth(1) {
|
||||
let params_iter = params.split('&').take(1);
|
||||
for val in params_iter {
|
||||
if val.starts_with(ACCESS_TOKEN_KEY) {
|
||||
return Some(val[ACCESS_TOKEN_KEY.len()..].into());
|
||||
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
|
||||
return Some(stripped.into());
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -177,7 +175,7 @@ impl WSHandler {
|
||||
}
|
||||
}
|
||||
|
||||
impl Handler for WSHandler {
|
||||
impl Handler for WsHandler {
|
||||
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
|
||||
// Path == "/notifications/hub?id=<id>==&access_token=<access_token>"
|
||||
//
|
||||
@@ -205,9 +203,7 @@ impl Handler for WSHandler {
|
||||
let handler_insert = self.out.clone();
|
||||
let handler_update = self.out.clone();
|
||||
|
||||
self.users
|
||||
.map
|
||||
.upsert(user_uuid, || vec![handler_insert], |ref mut v| v.push(handler_update));
|
||||
self.users.map.upsert(user_uuid, || vec![handler_insert], |ref mut v| v.push(handler_update));
|
||||
|
||||
// Schedule a ping to keep the connection alive
|
||||
self.out.timeout(PING_MS, PING)
|
||||
@@ -217,7 +213,11 @@ impl Handler for WSHandler {
|
||||
if let Message::Text(text) = msg.clone() {
|
||||
let json = &text[..text.len() - 1]; // Remove last char
|
||||
|
||||
if let Ok(InitialMessage { protocol, version }) = from_str::<InitialMessage>(json) {
|
||||
if let Ok(InitialMessage {
|
||||
protocol,
|
||||
version,
|
||||
}) = from_str::<InitialMessage>(json)
|
||||
{
|
||||
if &protocol == "messagepack" && version == 1 {
|
||||
return self.out.send(&INITIAL_RESPONSE[..]); // Respond to initial message
|
||||
}
|
||||
@@ -241,13 +241,13 @@ impl Handler for WSHandler {
|
||||
}
|
||||
}
|
||||
|
||||
struct WSFactory {
|
||||
struct WsFactory {
|
||||
pub users: WebSocketUsers,
|
||||
}
|
||||
|
||||
impl WSFactory {
|
||||
impl WsFactory {
|
||||
pub fn init() -> Self {
|
||||
WSFactory {
|
||||
WsFactory {
|
||||
users: WebSocketUsers {
|
||||
map: Arc::new(CHashMap::new()),
|
||||
},
|
||||
@@ -255,11 +255,11 @@ impl WSFactory {
|
||||
}
|
||||
}
|
||||
|
||||
impl Factory for WSFactory {
|
||||
type Handler = WSHandler;
|
||||
impl Factory for WsFactory {
|
||||
type Handler = WsHandler;
|
||||
|
||||
fn connection_made(&mut self, out: Sender) -> Self::Handler {
|
||||
WSHandler {
|
||||
WsHandler {
|
||||
out,
|
||||
user_uuid: None,
|
||||
users: self.users.clone(),
|
||||
@@ -296,10 +296,7 @@ impl WebSocketUsers {
|
||||
// NOTE: The last modified date needs to be updated before calling these methods
|
||||
pub fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||
let data = create_update(
|
||||
vec![
|
||||
("UserId".into(), user.uuid.clone().into()),
|
||||
("Date".into(), serialize_date(user.updated_at)),
|
||||
],
|
||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
ut,
|
||||
);
|
||||
|
||||
@@ -395,6 +392,10 @@ pub enum UpdateType {
|
||||
|
||||
LogOut = 11,
|
||||
|
||||
SyncSendCreate = 12,
|
||||
SyncSendUpdate = 13,
|
||||
SyncSendDelete = 14,
|
||||
|
||||
None = 100,
|
||||
}
|
||||
|
||||
@@ -402,15 +403,17 @@ use rocket::State;
|
||||
pub type Notify<'a> = State<'a, WebSocketUsers>;
|
||||
|
||||
pub fn start_notification_server() -> WebSocketUsers {
|
||||
let factory = WSFactory::init();
|
||||
let factory = WsFactory::init();
|
||||
let users = factory.users.clone();
|
||||
|
||||
if CONFIG.websocket_enabled() {
|
||||
thread::spawn(move || {
|
||||
let mut settings = ws::Settings::default();
|
||||
settings.max_connections = 500;
|
||||
settings.queue_size = 2;
|
||||
settings.panic_on_internal = false;
|
||||
let settings = ws::Settings {
|
||||
max_connections: 500,
|
||||
queue_size: 2,
|
||||
panic_on_internal: false,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
ws::Builder::new()
|
||||
.with_settings(settings)
|
||||
|
@@ -10,7 +10,7 @@ pub fn routes() -> Vec<Route> {
|
||||
// If addding more routes here, consider also adding them to
|
||||
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
||||
if CONFIG.web_vault_enabled() {
|
||||
routes![web_index, app_id, web_files, attachments, alive, static_files]
|
||||
routes![web_index, app_id, web_files, attachments, sends, alive, static_files]
|
||||
} else {
|
||||
routes![attachments, alive, static_files]
|
||||
}
|
||||
@@ -60,6 +60,11 @@ fn attachments(uuid: String, file: PathBuf) -> Option<NamedFile> {
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file)).ok()
|
||||
}
|
||||
|
||||
#[get("/sends/<send_id>/<file_id>")]
|
||||
fn sends(send_id: String, file_id: String) -> Option<NamedFile> {
|
||||
NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).ok()
|
||||
}
|
||||
|
||||
#[get("/alive")]
|
||||
fn alive() -> Json<String> {
|
||||
use crate::util::format_date;
|
||||
@@ -78,12 +83,15 @@ fn static_files(filename: String) -> Result<Content<&'static [u8]>, Error> {
|
||||
"hibp.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/hibp.png"))),
|
||||
|
||||
"bootstrap.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||
"bootstrap-native.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js"))),
|
||||
"md5.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/md5.js"))),
|
||||
"bootstrap-native.js" => {
|
||||
Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js")))
|
||||
}
|
||||
"identicon.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/identicon.js"))),
|
||||
"datatables.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.5.1.slim.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.5.1.slim.js"))),
|
||||
"jquery-3.5.1.slim.js" => {
|
||||
Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.5.1.slim.js")))
|
||||
}
|
||||
_ => err!(format!("Static file not found: {}", filename)),
|
||||
}
|
||||
}
|
||||
|
136
src/auth.rs
@@ -58,28 +58,28 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
|
||||
.map_res("Error decoding JWT")
|
||||
}
|
||||
|
||||
pub fn decode_login(token: &str) -> Result<LoginJWTClaims, Error> {
|
||||
pub fn decode_login(token: &str) -> Result<LoginJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_LOGIN_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_invite(token: &str) -> Result<InviteJWTClaims, Error> {
|
||||
pub fn decode_invite(token: &str) -> Result<InviteJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_INVITE_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_delete(token: &str) -> Result<DeleteJWTClaims, Error> {
|
||||
pub fn decode_delete(token: &str) -> Result<DeleteJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_DELETE_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_verify_email(token: &str) -> Result<VerifyEmailJWTClaims, Error> {
|
||||
pub fn decode_verify_email(token: &str) -> Result<VerifyEmailJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_VERIFYEMAIL_ISSUER.to_string())
|
||||
}
|
||||
|
||||
pub fn decode_admin(token: &str) -> Result<AdminJWTClaims, Error> {
|
||||
pub fn decode_admin(token: &str) -> Result<AdminJwtClaims, Error> {
|
||||
decode_jwt(token, JWT_ADMIN_ISSUER.to_string())
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct LoginJWTClaims {
|
||||
pub struct LoginJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
@@ -110,7 +110,7 @@ pub struct LoginJWTClaims {
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct InviteJWTClaims {
|
||||
pub struct InviteJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
@@ -132,9 +132,9 @@ pub fn generate_invite_claims(
|
||||
org_id: Option<String>,
|
||||
user_org_id: Option<String>,
|
||||
invited_by_email: Option<String>,
|
||||
) -> InviteJWTClaims {
|
||||
) -> InviteJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
InviteJWTClaims {
|
||||
InviteJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
iss: JWT_INVITE_ISSUER.to_string(),
|
||||
@@ -147,7 +147,7 @@ pub fn generate_invite_claims(
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct DeleteJWTClaims {
|
||||
pub struct DeleteJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
@@ -158,9 +158,9 @@ pub struct DeleteJWTClaims {
|
||||
pub sub: String,
|
||||
}
|
||||
|
||||
pub fn generate_delete_claims(uuid: String) -> DeleteJWTClaims {
|
||||
pub fn generate_delete_claims(uuid: String) -> DeleteJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
DeleteJWTClaims {
|
||||
DeleteJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
iss: JWT_DELETE_ISSUER.to_string(),
|
||||
@@ -169,7 +169,7 @@ pub fn generate_delete_claims(uuid: String) -> DeleteJWTClaims {
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VerifyEmailJWTClaims {
|
||||
pub struct VerifyEmailJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
@@ -180,9 +180,9 @@ pub struct VerifyEmailJWTClaims {
|
||||
pub sub: String,
|
||||
}
|
||||
|
||||
pub fn generate_verify_email_claims(uuid: String) -> DeleteJWTClaims {
|
||||
pub fn generate_verify_email_claims(uuid: String) -> DeleteJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
DeleteJWTClaims {
|
||||
DeleteJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
||||
@@ -191,7 +191,7 @@ pub fn generate_verify_email_claims(uuid: String) -> DeleteJWTClaims {
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AdminJWTClaims {
|
||||
pub struct AdminJwtClaims {
|
||||
// Not before
|
||||
pub nbf: i64,
|
||||
// Expiration time
|
||||
@@ -202,9 +202,9 @@ pub struct AdminJWTClaims {
|
||||
pub sub: String,
|
||||
}
|
||||
|
||||
pub fn generate_admin_claims() -> AdminJWTClaims {
|
||||
pub fn generate_admin_claims() -> AdminJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
AdminJWTClaims {
|
||||
AdminJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(20)).timestamp(),
|
||||
iss: JWT_ADMIN_ISSUER.to_string(),
|
||||
@@ -222,13 +222,11 @@ use crate::db::{
|
||||
DbConn,
|
||||
};
|
||||
|
||||
pub struct Headers {
|
||||
pub struct Host {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Host {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
@@ -262,6 +260,30 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
format!("{}://{}", protocol, host)
|
||||
};
|
||||
|
||||
Outcome::Success(Host {
|
||||
host,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Headers {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
|
||||
let host = match Host::from_request(request) {
|
||||
Outcome::Forward(_) => return Outcome::Forward(()),
|
||||
Outcome::Failure(f) => return Outcome::Failure(f),
|
||||
Outcome::Success(host) => host.host,
|
||||
};
|
||||
|
||||
// Get access_token
|
||||
let access_token: &str = match headers.get_one("Authorization") {
|
||||
Some(a) => match a.rsplit("Bearer ").next() {
|
||||
@@ -296,10 +318,8 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
};
|
||||
|
||||
if user.security_stamp != claims.sstamp {
|
||||
if let Some(stamp_exception) = user
|
||||
.stamp_exception
|
||||
.as_deref()
|
||||
.and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
|
||||
if let Some(stamp_exception) =
|
||||
user.stamp_exception.as_deref().and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
|
||||
{
|
||||
let current_route = match request.route().and_then(|r| r.name) {
|
||||
Some(name) => name,
|
||||
@@ -317,7 +337,11 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
}
|
||||
}
|
||||
|
||||
Outcome::Success(Headers { host, device, user })
|
||||
Outcome::Success(Headers {
|
||||
host,
|
||||
device,
|
||||
user,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -330,9 +354,9 @@ pub struct OrgHeaders {
|
||||
pub org_id: String,
|
||||
}
|
||||
|
||||
// org_id is usually the second param ("/organizations/<org_id>")
|
||||
// But there are cases where it is located in a query value.
|
||||
// First check the param, if this is not a valid uuid, we will try the query value.
|
||||
// org_id is usually the second path param ("/organizations/<org_id>"),
|
||||
// but there are cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
fn get_org_id(request: &Request) -> Option<String> {
|
||||
if let Some(Ok(org_id)) = request.get_param::<String>(1) {
|
||||
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||
@@ -429,19 +453,19 @@ impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Headers> for AdminHeaders {
|
||||
fn into(self) -> Headers {
|
||||
impl From<AdminHeaders> for Headers {
|
||||
fn from(h: AdminHeaders) -> Headers {
|
||||
Headers {
|
||||
host: self.host,
|
||||
device: self.device,
|
||||
user: self.user,
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// col_id is usually the forth param ("/organizations/<org_id>/collections/<col_id>")
|
||||
// But there cloud be cases where it is located in a query value.
|
||||
// First check the param, if this is not a valid uuid, we will try the query value.
|
||||
// col_id is usually the fourth path param ("/organizations/<org_id>/collections/<col_id>"),
|
||||
// but there could be cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
fn get_col_id(request: &Request) -> Option<String> {
|
||||
if let Some(Ok(col_id)) = request.get_param::<String>(3) {
|
||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||
@@ -484,8 +508,12 @@ impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeaders {
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
if !headers.org_user.access_all {
|
||||
match CollectionUser::find_by_collection_and_user(&col_id, &headers.org_user.user_uuid, &conn) {
|
||||
if !headers.org_user.has_full_access() {
|
||||
match CollectionUser::find_by_collection_and_user(
|
||||
&col_id,
|
||||
&headers.org_user.user_uuid,
|
||||
&conn,
|
||||
) {
|
||||
Some(_) => (),
|
||||
None => err_handler!("The current user isn't a manager for this collection"),
|
||||
}
|
||||
@@ -508,12 +536,12 @@ impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeaders {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Headers> for ManagerHeaders {
|
||||
fn into(self) -> Headers {
|
||||
impl From<ManagerHeaders> for Headers {
|
||||
fn from(h: ManagerHeaders) -> Headers {
|
||||
Headers {
|
||||
host: self.host,
|
||||
device: self.device,
|
||||
user: self.user,
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -550,12 +578,12 @@ impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeadersLoose {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Headers> for ManagerHeadersLoose {
|
||||
fn into(self) -> Headers {
|
||||
impl From<ManagerHeadersLoose> for Headers {
|
||||
fn from(h: ManagerHeadersLoose) -> Headers {
|
||||
Headers {
|
||||
host: self.host,
|
||||
device: self.device,
|
||||
user: self.user,
|
||||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -615,10 +643,10 @@ impl<'a, 'r> FromRequest<'a, 'r> for ClientIp {
|
||||
None
|
||||
};
|
||||
|
||||
let ip = ip
|
||||
.or_else(|| req.remote().map(|r| r.ip()))
|
||||
.unwrap_or_else(|| "0.0.0.0".parse().unwrap());
|
||||
let ip = ip.or_else(|| req.remote().map(|r| r.ip())).unwrap_or_else(|| "0.0.0.0".parse().unwrap());
|
||||
|
||||
Outcome::Success(ClientIp { ip })
|
||||
Outcome::Success(ClientIp {
|
||||
ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
134
src/config.rs
@@ -2,6 +2,7 @@ use std::process::exit;
|
||||
use std::sync::RwLock;
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use reqwest::Url;
|
||||
|
||||
use crate::{
|
||||
@@ -22,6 +23,21 @@ pub static CONFIG: Lazy<Config> = Lazy::new(|| {
|
||||
})
|
||||
});
|
||||
|
||||
static PRIVACY_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"[\w]").unwrap());
|
||||
const PRIVACY_CONFIG: &[&str] = &[
|
||||
"allowed_iframe_ancestors",
|
||||
"database_url",
|
||||
"domain_origin",
|
||||
"domain_path",
|
||||
"domain",
|
||||
"helo_name",
|
||||
"org_creation_users",
|
||||
"signups_domains_whitelist",
|
||||
"smtp_from",
|
||||
"smtp_host",
|
||||
"smtp_username",
|
||||
];
|
||||
|
||||
pub type Pass = String;
|
||||
|
||||
macro_rules! make_config {
|
||||
@@ -52,6 +68,7 @@ macro_rules! make_config {
|
||||
}
|
||||
|
||||
impl ConfigBuilder {
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
fn from_env() -> Self {
|
||||
match dotenv::from_path(".env") {
|
||||
Ok(_) => (),
|
||||
@@ -196,9 +213,38 @@ macro_rules! make_config {
|
||||
}, )+
|
||||
]}, )+ ])
|
||||
}
|
||||
|
||||
pub fn get_support_json(&self) -> serde_json::Value {
|
||||
let cfg = {
|
||||
let inner = &self.inner.read().unwrap();
|
||||
inner.config.clone()
|
||||
};
|
||||
|
||||
json!({ $($(
|
||||
stringify!($name): make_config!{ @supportstr $name, cfg.$name, $ty, $none_action },
|
||||
)+)+ })
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Support string print
|
||||
( @supportstr $name:ident, $value:expr, Pass, option ) => { $value.as_ref().map(|_| String::from("***")) }; // Optional pass, we map to an Option<String> with "***"
|
||||
( @supportstr $name:ident, $value:expr, Pass, $none_action:ident ) => { String::from("***") }; // Required pass, we return "***"
|
||||
( @supportstr $name:ident, $value:expr, $ty:ty, option ) => { // Optional other value, we return as is or convert to string to apply the privacy config
|
||||
if PRIVACY_CONFIG.contains(&stringify!($name)) {
|
||||
json!($value.as_ref().map(|x| PRIVACY_REGEX.replace_all(&x.to_string(), "${1}*").to_string()))
|
||||
} else {
|
||||
json!($value)
|
||||
}
|
||||
};
|
||||
( @supportstr $name:ident, $value:expr, $ty:ty, $none_action:ident ) => { // Required other value, we return as is or convert to string to apply the privacy config
|
||||
if PRIVACY_CONFIG.contains(&stringify!($name)) {
|
||||
json!(PRIVACY_REGEX.replace_all(&$value.to_string(), "${1}*").to_string())
|
||||
} else {
|
||||
json!($value)
|
||||
}
|
||||
};
|
||||
|
||||
// Group or empty string
|
||||
( @show ) => { "" };
|
||||
( @show $lit:literal ) => { $lit };
|
||||
@@ -253,6 +299,8 @@ make_config! {
|
||||
icon_cache_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "icon_cache");
|
||||
/// Attachments folder
|
||||
attachments_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "attachments");
|
||||
/// Sends folder
|
||||
sends_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "sends");
|
||||
/// Templates folder
|
||||
templates_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "templates");
|
||||
/// Session JWT key
|
||||
@@ -268,6 +316,17 @@ make_config! {
|
||||
/// Websocket port
|
||||
websocket_port: u16, false, def, 3012;
|
||||
},
|
||||
jobs {
|
||||
/// Job scheduler poll interval |> How often the job scheduler thread checks for jobs to run.
|
||||
/// Set to 0 to globally disable scheduled jobs.
|
||||
job_poll_interval_ms: u64, false, def, 30_000;
|
||||
/// Send purge schedule |> Cron schedule of the job that checks for Sends past their deletion date.
|
||||
/// Defaults to hourly. Set blank to disable this job.
|
||||
send_purge_schedule: String, false, def, "0 5 * * * *".to_string();
|
||||
/// Trash purge schedule |> Cron schedule of the job that checks for trashed items to delete permanently.
|
||||
/// Defaults to daily. Set blank to disable this job.
|
||||
trash_purge_schedule: String, false, def, "0 5 0 * * *".to_string();
|
||||
},
|
||||
|
||||
/// General settings
|
||||
settings {
|
||||
@@ -291,11 +350,16 @@ make_config! {
|
||||
/// Per-organization attachment limit (KB) |> Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
||||
org_attachment_limit: i64, true, option;
|
||||
|
||||
/// Trash auto-delete days |> Number of days to wait before auto-deleting a trashed item.
|
||||
/// If unset, trashed items are not auto-deleted. This setting applies globally, so make
|
||||
/// sure to inform all users of any changes to this setting.
|
||||
trash_auto_delete_days: i64, true, option;
|
||||
|
||||
/// Disable icon downloads |> Set to true to disable icon downloading, this would still serve icons from
|
||||
/// $ICON_CACHE_FOLDER, but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||
/// otherwise it will delete them and they won't be downloaded again.
|
||||
disable_icon_download: bool, true, def, false;
|
||||
/// Allow new signups |> Controls whether new users can register. Users can be invited by the bitwarden_rs admin even if this is disabled
|
||||
/// Allow new signups |> Controls whether new users can register. Users can be invited by the vaultwarden admin even if this is disabled
|
||||
signups_allowed: bool, true, def, true;
|
||||
/// Require email verification on signups. This will prevent logins from succeeding until the address has been verified
|
||||
signups_verify: bool, true, def, false;
|
||||
@@ -321,7 +385,7 @@ make_config! {
|
||||
admin_token: Pass, true, option;
|
||||
|
||||
/// Invitation organization name |> Name shown in the invitation emails that don't come from a specific organization
|
||||
invitation_org_name: String, true, def, "Bitwarden_RS".to_string();
|
||||
invitation_org_name: String, true, def, "Vaultwarden".to_string();
|
||||
},
|
||||
|
||||
/// Advanced settings
|
||||
@@ -370,7 +434,7 @@ make_config! {
|
||||
/// Log level
|
||||
log_level: String, false, def, "Info".to_string();
|
||||
|
||||
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using bitwarden_rs on some exotic filesystems,
|
||||
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using vaultwarden on some exotic filesystems,
|
||||
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
|
||||
enable_db_wal: bool, false, def, true;
|
||||
|
||||
@@ -425,7 +489,7 @@ make_config! {
|
||||
/// From Address
|
||||
smtp_from: String, true, def, String::new();
|
||||
/// From Name
|
||||
smtp_from_name: String, true, def, "Bitwarden_RS".to_string();
|
||||
smtp_from_name: String, true, def, "Vaultwarden".to_string();
|
||||
/// Username
|
||||
smtp_username: String, true, option;
|
||||
/// Password
|
||||
@@ -458,21 +522,19 @@ make_config! {
|
||||
}
|
||||
|
||||
fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
|
||||
// Validate connection URL is valid and DB feature is enabled
|
||||
DbConnType::from_url(&cfg.database_url)?;
|
||||
|
||||
let limit = 256;
|
||||
if cfg.database_max_conns < 1 || cfg.database_max_conns > limit {
|
||||
err!(format!(
|
||||
"`DATABASE_MAX_CONNS` contains an invalid value. Ensure it is between 1 and {}.",
|
||||
limit,
|
||||
));
|
||||
err!(format!("`DATABASE_MAX_CONNS` contains an invalid value. Ensure it is between 1 and {}.", limit,));
|
||||
}
|
||||
|
||||
let dom = cfg.domain.to_lowercase();
|
||||
if !dom.starts_with("http://") && !dom.starts_with("https://") {
|
||||
err!("DOMAIN variable needs to contain the protocol (http, https). Use 'http[s]://bw.example.com' instead of 'bw.example.com'");
|
||||
err!(
|
||||
"DOMAIN variable needs to contain the protocol (http, https). Use 'http[s]://bw.example.com' instead of 'bw.example.com'"
|
||||
);
|
||||
}
|
||||
|
||||
let whitelist = &cfg.signups_domains_whitelist;
|
||||
@@ -481,10 +543,10 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
let org_creation_users = cfg.org_creation_users.trim().to_lowercase();
|
||||
if !(org_creation_users.is_empty() || org_creation_users == "all" || org_creation_users == "none") {
|
||||
if org_creation_users.split(',').any(|u| !u.contains('@')) {
|
||||
err!("`ORG_CREATION_USERS` contains invalid email addresses");
|
||||
}
|
||||
if !(org_creation_users.is_empty() || org_creation_users == "all" || org_creation_users == "none")
|
||||
&& org_creation_users.split(',').any(|u| !u.contains('@'))
|
||||
{
|
||||
err!("`ORG_CREATION_USERS` contains invalid email addresses");
|
||||
}
|
||||
|
||||
if let Some(ref token) = cfg.admin_token {
|
||||
@@ -510,6 +572,10 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support")
|
||||
}
|
||||
|
||||
if cfg.smtp_host.is_some() && !cfg.smtp_from.contains('@') {
|
||||
err!("SMTP_FROM does not contain a mandatory @ sign")
|
||||
}
|
||||
|
||||
if cfg.smtp_username.is_some() != cfg.smtp_password.is_some() {
|
||||
err!("Both `SMTP_USERNAME` and `SMTP_PASSWORD` need to be set to enable email authentication")
|
||||
}
|
||||
@@ -529,7 +595,6 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
|
||||
// Check if the icon blacklist regex is valid
|
||||
if let Some(ref r) = cfg.icon_blacklist_regex {
|
||||
use regex::Regex;
|
||||
let validate_regex = Regex::new(&r);
|
||||
match validate_regex {
|
||||
Ok(_) => (),
|
||||
@@ -577,7 +642,12 @@ impl Config {
|
||||
validate_config(&config)?;
|
||||
|
||||
Ok(Config {
|
||||
inner: RwLock::new(Inner { templates: load_templates(&config.templates_folder), config, _env, _usr }),
|
||||
inner: RwLock::new(Inner {
|
||||
templates: load_templates(&config.templates_folder),
|
||||
config,
|
||||
_env,
|
||||
_usr,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -650,7 +720,7 @@ impl Config {
|
||||
/// Tests whether the specified user is allowed to create an organization.
|
||||
pub fn is_org_creation_allowed(&self, email: &str) -> bool {
|
||||
let users = self.org_creation_users();
|
||||
if users == "" || users == "all" {
|
||||
if users.is_empty() || users == "all" {
|
||||
true
|
||||
} else if users == "none" {
|
||||
false
|
||||
@@ -704,8 +774,10 @@ impl Config {
|
||||
let akey_s = data_encoding::BASE64.encode(&akey);
|
||||
|
||||
// Save the new value
|
||||
let mut builder = ConfigBuilder::default();
|
||||
builder._duo_akey = Some(akey_s.clone());
|
||||
let builder = ConfigBuilder {
|
||||
_duo_akey: Some(akey_s.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
self.update_config_partial(builder).ok();
|
||||
|
||||
akey_s
|
||||
@@ -796,9 +868,7 @@ fn case_helper<'reg, 'rc>(
|
||||
rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h
|
||||
.param(0)
|
||||
.ok_or_else(|| RenderError::new("Param not found for helper \"case\""))?;
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"case\""))?;
|
||||
let value = param.value().clone();
|
||||
|
||||
if h.params().iter().skip(1).any(|x| x.value() == &value) {
|
||||
@@ -815,18 +885,18 @@ fn js_escape_helper<'reg, 'rc>(
|
||||
_rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h
|
||||
.param(0)
|
||||
.ok_or_else(|| RenderError::new("Param not found for helper \"js_escape\""))?;
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"js_escape\""))?;
|
||||
|
||||
let value = param
|
||||
.value()
|
||||
.as_str()
|
||||
.ok_or_else(|| RenderError::new("Param for helper \"js_escape\" is not a String"))?;
|
||||
let no_quote = h.param(1).is_some();
|
||||
|
||||
let escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
||||
let quoted_value = format!(""{}"", escaped_value);
|
||||
let value =
|
||||
param.value().as_str().ok_or_else(|| RenderError::new("Param for helper \"js_escape\" is not a String"))?;
|
||||
|
||||
out.write("ed_value)?;
|
||||
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
||||
if !no_quote {
|
||||
escaped_value = format!(""{}"", escaped_value);
|
||||
}
|
||||
|
||||
out.write(&escaped_value)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -47,9 +47,7 @@ pub fn get_random_64() -> Vec<u8> {
|
||||
pub fn get_random(mut array: Vec<u8>) -> Vec<u8> {
|
||||
use ring::rand::{SecureRandom, SystemRandom};
|
||||
|
||||
SystemRandom::new()
|
||||
.fill(&mut array)
|
||||
.expect("Error generating random values");
|
||||
SystemRandom::new().fill(&mut array).expect("Error generating random values");
|
||||
|
||||
array
|
||||
}
|
||||
@@ -67,7 +65,7 @@ pub fn generate_token(token_size: u32) -> Result<String, Error> {
|
||||
// token of fixed width, left-padding with 0 as needed.
|
||||
use rand::{thread_rng, Rng};
|
||||
let mut rng = thread_rng();
|
||||
let number: u64 = rng.gen_range(low, high);
|
||||
let number: u64 = rng.gen_range(low..high);
|
||||
let token = format!("{:0size$}", number, size = token_size as usize);
|
||||
|
||||
Ok(token)
|
||||
|
100
src/db/mod.rs
@@ -1,6 +1,3 @@
|
||||
use std::process::Command;
|
||||
|
||||
use chrono::prelude::*;
|
||||
use diesel::r2d2::{ConnectionManager, Pool, PooledConnection};
|
||||
use rocket::{
|
||||
http::Status,
|
||||
@@ -25,7 +22,6 @@ pub mod __mysql_schema;
|
||||
#[path = "schemas/postgresql/schema.rs"]
|
||||
pub mod __postgresql_schema;
|
||||
|
||||
|
||||
// This is used to generate the main DbConn and DbPool enums, which contain one variant for each database supported
|
||||
macro_rules! generate_connections {
|
||||
( $( $name:ident: $ty:ty ),+ ) => {
|
||||
@@ -37,6 +33,7 @@ macro_rules! generate_connections {
|
||||
pub enum DbConn { $( #[cfg($name)] $name(PooledConnection<ConnectionManager< $ty >>), )+ }
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
#[derive(Clone)]
|
||||
pub enum DbPool { $( #[cfg($name)] $name(Pool<ConnectionManager< $ty >>), )+ }
|
||||
|
||||
impl DbPool {
|
||||
@@ -109,7 +106,6 @@ impl DbConnType {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! db_run {
|
||||
// Same for all dbs
|
||||
@@ -124,26 +120,44 @@ macro_rules! db_run {
|
||||
$($(
|
||||
#[cfg($db)]
|
||||
crate::db::DbConn::$db(ref $conn) => {
|
||||
paste::paste! {
|
||||
paste::paste! {
|
||||
#[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *};
|
||||
#[allow(unused)] use [<__ $db _model>]::*;
|
||||
#[allow(unused)] use crate::db::FromDb;
|
||||
#[allow(unused)] use crate::db::FromDb;
|
||||
}
|
||||
$body
|
||||
},
|
||||
)+)+
|
||||
}
|
||||
};
|
||||
|
||||
// Same for all dbs
|
||||
( @raw $conn:ident: $body:block ) => {
|
||||
db_run! { @raw $conn: sqlite, mysql, postgresql $body }
|
||||
};
|
||||
|
||||
// Different code for each db
|
||||
( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {
|
||||
#[allow(unused)] use diesel::prelude::*;
|
||||
#[allow(unused_variables)]
|
||||
match $conn {
|
||||
$($(
|
||||
#[cfg($db)]
|
||||
crate::db::DbConn::$db(ref $conn) => {
|
||||
$body
|
||||
},
|
||||
)+)+
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
pub trait FromDb {
|
||||
type Output;
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn from_db(self) -> Self::Output;
|
||||
}
|
||||
|
||||
// For each struct eg. Cipher, we create a CipherDb inside a module named __$db_model (where $db is sqlite, mysql or postgresql),
|
||||
// For each struct eg. Cipher, we create a CipherDb inside a module named __$db_model (where $db is sqlite, mysql or postgresql),
|
||||
// to implement the Diesel traits. We also provide methods to convert between them and the basic structs. Later, that module will be auto imported when using db_run!
|
||||
#[macro_export]
|
||||
macro_rules! db_object {
|
||||
@@ -153,10 +167,10 @@ macro_rules! db_object {
|
||||
$( $( #[$field_attr:meta] )* $vis:vis $field:ident : $typ:ty ),+
|
||||
$(,)?
|
||||
}
|
||||
)+ ) => {
|
||||
)+ ) => {
|
||||
// Create the normal struct, without attributes
|
||||
$( pub struct $name { $( /*$( #[$field_attr] )**/ $vis $field : $typ, )+ } )+
|
||||
|
||||
|
||||
#[cfg(sqlite)]
|
||||
pub mod __sqlite_model { $( db_object! { @db sqlite | $( #[$attr] )* | $name | $( $( #[$field_attr] )* $field : $typ ),+ } )+ }
|
||||
#[cfg(mysql)]
|
||||
@@ -177,7 +191,7 @@ macro_rules! db_object {
|
||||
)+ }
|
||||
|
||||
impl [<$name Db>] {
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
#[inline(always)] pub fn to_db(x: &super::$name) -> Self { Self { $( $field: x.$field.clone(), )+ } }
|
||||
}
|
||||
|
||||
@@ -202,23 +216,36 @@ macro_rules! db_object {
|
||||
// Reexport the models, needs to be after the macros are defined so it can access them
|
||||
pub mod models;
|
||||
|
||||
/// Creates a back-up of the database using sqlite3
|
||||
pub fn backup_database() -> Result<(), Error> {
|
||||
use std::path::Path;
|
||||
let db_url = CONFIG.database_url();
|
||||
let db_path = Path::new(&db_url).parent().unwrap();
|
||||
/// Creates a back-up of the sqlite database
|
||||
/// MySQL/MariaDB and PostgreSQL are not supported.
|
||||
pub fn backup_database(conn: &DbConn) -> Result<(), Error> {
|
||||
db_run! {@raw conn:
|
||||
postgresql, mysql {
|
||||
err!("PostgreSQL and MySQL/MariaDB do not support this backup feature");
|
||||
}
|
||||
sqlite {
|
||||
use std::path::Path;
|
||||
let db_url = CONFIG.database_url();
|
||||
let db_path = Path::new(&db_url).parent().unwrap().to_string_lossy();
|
||||
let file_date = chrono::Utc::now().format("%Y%m%d_%H%M%S").to_string();
|
||||
diesel::sql_query(format!("VACUUM INTO '{}/db_{}.sqlite3'", db_path, file_date)).execute(conn)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let now: DateTime<Utc> = Utc::now();
|
||||
let file_date = now.format("%Y%m%d").to_string();
|
||||
let backup_command: String = format!("{}{}{}", ".backup 'db_", file_date, ".sqlite3'");
|
||||
|
||||
Command::new("sqlite3")
|
||||
.current_dir(db_path)
|
||||
.args(&["db.sqlite3", &backup_command])
|
||||
.output()
|
||||
.expect("Can't open database, sqlite3 is not available, make sure it's installed and available on the PATH");
|
||||
|
||||
Ok(())
|
||||
/// Get the SQL Server version
|
||||
pub fn get_sql_server_version(conn: &DbConn) -> String {
|
||||
db_run! {@raw conn:
|
||||
postgresql, mysql {
|
||||
no_arg_sql_function!(version, diesel::sql_types::Text);
|
||||
diesel::select(version).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||
}
|
||||
sqlite {
|
||||
no_arg_sql_function!(sqlite_version, diesel::sql_types::Text);
|
||||
diesel::select(sqlite_version).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to retrieve a single connection from the managed database pool. If
|
||||
@@ -259,10 +286,9 @@ mod sqlite_migrations {
|
||||
|
||||
use diesel::{Connection, RunQueryDsl};
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
let connection =
|
||||
diesel::sqlite::SqliteConnection::establish(&crate::CONFIG.database_url())?;
|
||||
let connection = diesel::sqlite::SqliteConnection::establish(&crate::CONFIG.database_url())?;
|
||||
// Disable Foreign Key Checks during migration
|
||||
|
||||
|
||||
// Scoped to a connection.
|
||||
diesel::sql_query("PRAGMA foreign_keys = OFF")
|
||||
.execute(&connection)
|
||||
@@ -270,9 +296,7 @@ mod sqlite_migrations {
|
||||
|
||||
// Turn on WAL in SQLite
|
||||
if crate::CONFIG.enable_db_wal() {
|
||||
diesel::sql_query("PRAGMA journal_mode=wal")
|
||||
.execute(&connection)
|
||||
.expect("Failed to turn on WAL");
|
||||
diesel::sql_query("PRAGMA journal_mode=wal").execute(&connection).expect("Failed to turn on WAL");
|
||||
}
|
||||
|
||||
embedded_migrations::run_with_output(&connection, &mut std::io::stdout())?;
|
||||
@@ -288,8 +312,7 @@ mod mysql_migrations {
|
||||
pub fn run_migrations() -> Result<(), super::Error> {
|
||||
use diesel::{Connection, RunQueryDsl};
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
let connection =
|
||||
diesel::mysql::MysqlConnection::establish(&crate::CONFIG.database_url())?;
|
||||
let connection = diesel::mysql::MysqlConnection::establish(&crate::CONFIG.database_url())?;
|
||||
// Disable Foreign Key Checks during migration
|
||||
|
||||
// Scoped to a connection/session.
|
||||
@@ -310,10 +333,9 @@ mod postgresql_migrations {
|
||||
pub fn run_migrations() -> Result<(), super::Error> {
|
||||
use diesel::{Connection, RunQueryDsl};
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
let connection =
|
||||
diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?;
|
||||
let connection = diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?;
|
||||
// Disable Foreign Key Checks during migration
|
||||
|
||||
|
||||
// FIXME: Per https://www.postgresql.org/docs/12/sql-set-constraints.html,
|
||||
// "SET CONSTRAINTS sets the behavior of constraint checking within the
|
||||
// current transaction", so this setting probably won't take effect for
|
||||
|
@@ -4,7 +4,7 @@ use super::Cipher;
|
||||
use crate::CONFIG;
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "attachments"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(super::Cipher, foreign_key = "cipher_uuid")]
|
||||
@@ -59,7 +59,6 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Attachment {
|
||||
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
|
@@ -1,20 +1,15 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::CONFIG;
|
||||
|
||||
use super::{
|
||||
Attachment,
|
||||
CollectionCipher,
|
||||
Favorite,
|
||||
FolderCipher,
|
||||
Organization,
|
||||
User,
|
||||
UserOrgStatus,
|
||||
UserOrgType,
|
||||
Attachment, CollectionCipher, Favorite, FolderCipher, Organization, User, UserOrgStatus, UserOrgType,
|
||||
UserOrganization,
|
||||
};
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "ciphers"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
@@ -83,46 +78,54 @@ impl Cipher {
|
||||
use crate::util::format_date;
|
||||
|
||||
let attachments = Attachment::find_by_cipher(&self.uuid, conn);
|
||||
let attachments_json: Vec<Value> = attachments.iter().map(|c| c.to_json(host)).collect();
|
||||
// When there are no attachments use null instead of an empty array
|
||||
let attachments_json = if attachments.is_empty() {
|
||||
Value::Null
|
||||
} else {
|
||||
attachments.iter().map(|c| c.to_json(host)).collect()
|
||||
};
|
||||
|
||||
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
let password_history_json = self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
let password_history_json =
|
||||
self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
|
||||
let (read_only, hide_passwords) =
|
||||
match self.get_access_restrictions(&user_uuid, conn) {
|
||||
Some((ro, hp)) => (ro, hp),
|
||||
None => {
|
||||
error!("Cipher ownership assertion failure");
|
||||
(true, true)
|
||||
},
|
||||
};
|
||||
let (read_only, hide_passwords) = match self.get_access_restrictions(&user_uuid, conn) {
|
||||
Some((ro, hp)) => (ro, hp),
|
||||
None => {
|
||||
error!("Cipher ownership assertion failure");
|
||||
(true, true)
|
||||
}
|
||||
};
|
||||
|
||||
// Get the data or a default empty value to avoid issues with the mobile apps
|
||||
let mut data_json: Value = serde_json::from_str(&self.data).unwrap_or_else(|_| json!({
|
||||
"Fields":null,
|
||||
"Name": self.name,
|
||||
"Notes":null,
|
||||
"Password":null,
|
||||
"PasswordHistory":null,
|
||||
"PasswordRevisionDate":null,
|
||||
"Response":null,
|
||||
"Totp":null,
|
||||
"Uris":null,
|
||||
"Username":null
|
||||
}));
|
||||
// Get the type_data or a default to an empty json object '{}'.
|
||||
// If not passing an empty object, mobile clients will crash.
|
||||
let mut type_data_json: Value = serde_json::from_str(&self.data).unwrap_or_else(|_| json!({}));
|
||||
|
||||
// TODO: ******* Backwards compat start **********
|
||||
// To remove backwards compatibility, just remove this entire section
|
||||
// and remove the compat code from ciphers::update_cipher_from_data
|
||||
if self.atype == 1 && data_json["Uris"].is_array() {
|
||||
let uri = data_json["Uris"][0]["Uri"].clone();
|
||||
data_json["Uri"] = uri;
|
||||
// NOTE: This was marked as *Backwards Compatibilty Code*, but as of January 2021 this is still being used by upstream
|
||||
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
|
||||
if self.atype == 1 {
|
||||
if type_data_json["Uris"].is_array() {
|
||||
let uri = type_data_json["Uris"][0]["Uri"].clone();
|
||||
type_data_json["Uri"] = uri;
|
||||
} else {
|
||||
// Upstream always has an Uri key/value
|
||||
type_data_json["Uri"] = Value::Null;
|
||||
}
|
||||
}
|
||||
// TODO: ******* Backwards compat end **********
|
||||
|
||||
// Clone the type_data and add some default value.
|
||||
let mut data_json = type_data_json.clone();
|
||||
|
||||
// NOTE: This was marked as *Backwards Compatibilty Code*, but as of January 2021 this is still being used by upstream
|
||||
// data_json should always contain the following keys with every atype
|
||||
data_json["Fields"] = json!(fields_json);
|
||||
data_json["Name"] = json!(self.name);
|
||||
data_json["Notes"] = json!(self.notes);
|
||||
data_json["PasswordHistory"] = json!(password_history_json);
|
||||
|
||||
// There are three types of cipher response models in upstream
|
||||
// Bitwarden: "cipherMini", "cipher", and "cipherDetails" (in order
|
||||
// of increasing level of detail). bitwarden_rs currently only
|
||||
// of increasing level of detail). vaultwarden currently only
|
||||
// supports the "cipherDetails" type, though it seems like the
|
||||
// Bitwarden clients will ignore extra fields.
|
||||
//
|
||||
@@ -137,6 +140,8 @@ impl Cipher {
|
||||
"Favorite": self.is_favorite(&user_uuid, conn),
|
||||
"OrganizationId": self.organization_uuid,
|
||||
"Attachments": attachments_json,
|
||||
// We have UseTotp set to true by default within the Organization model.
|
||||
// This variable together with UsersGetPremium is used to show or hide the TOTP counter.
|
||||
"OrganizationUseTotp": true,
|
||||
|
||||
// This field is specific to the cipherDetails type.
|
||||
@@ -155,6 +160,12 @@ impl Cipher {
|
||||
"ViewPassword": !hide_passwords,
|
||||
|
||||
"PasswordHistory": password_history_json,
|
||||
|
||||
// All Cipher types are included by default as null, but only the matching one will be populated
|
||||
"Login": null,
|
||||
"SecureNote": null,
|
||||
"Card": null,
|
||||
"Identity": null,
|
||||
});
|
||||
|
||||
let key = match self.atype {
|
||||
@@ -165,7 +176,7 @@ impl Cipher {
|
||||
_ => panic!("Wrong type"),
|
||||
};
|
||||
|
||||
json_object[key] = data_json;
|
||||
json_object[key] = type_data_json;
|
||||
json_object
|
||||
}
|
||||
|
||||
@@ -179,12 +190,10 @@ impl Cipher {
|
||||
None => {
|
||||
// Belongs to Organization, need to update affected users
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
UserOrganization::find_by_cipher_and_org(&self.uuid, &org_uuid, conn)
|
||||
.iter()
|
||||
.for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
user_uuids.push(user_org.user_uuid.clone())
|
||||
});
|
||||
UserOrganization::find_by_cipher_and_org(&self.uuid, &org_uuid, conn).iter().for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
user_uuids.push(user_org.user_uuid.clone())
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -255,6 +264,17 @@ impl Cipher {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Purge all ciphers that are old enough to be auto-deleted.
|
||||
pub fn purge_trash(conn: &DbConn) {
|
||||
if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() {
|
||||
let now = Utc::now().naive_utc();
|
||||
let dt = now - Duration::days(auto_delete_days);
|
||||
for cipher in Self::find_deleted_before(&dt, conn) {
|
||||
cipher.delete(&conn).ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
|
||||
@@ -448,7 +468,10 @@ impl Cipher {
|
||||
pub fn find_owned_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
||||
.filter(
|
||||
ciphers::user_uuid.eq(user_uuid)
|
||||
.and(ciphers::organization_uuid.is_null())
|
||||
)
|
||||
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||
}}
|
||||
}
|
||||
@@ -492,6 +515,15 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
/// Find all ciphers that were deleted before the specified datetime.
|
||||
pub fn find_deleted_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::deleted_at.lt(dt))
|
||||
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn get_collections(&self, user_id: &str, conn: &DbConn) -> Vec<String> {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
|
@@ -1,9 +1,9 @@
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{Organization, UserOrgStatus, UserOrgType, UserOrganization, User, Cipher};
|
||||
use super::{Cipher, Organization, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "collections"]
|
||||
#[belongs_to(Organization, foreign_key = "org_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
@@ -13,7 +13,7 @@ db_object! {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations)]
|
||||
#[table_name = "users_collections"]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[belongs_to(Collection, foreign_key = "collection_uuid")]
|
||||
@@ -25,7 +25,7 @@ db_object! {
|
||||
pub hide_passwords: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations)]
|
||||
#[table_name = "ciphers_collections"]
|
||||
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
||||
#[belongs_to(Collection, foreign_key = "collection_uuid")]
|
||||
@@ -49,12 +49,21 @@ impl Collection {
|
||||
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"ExternalId": null, // Not support by us
|
||||
"Id": self.uuid,
|
||||
"OrganizationId": self.org_uuid,
|
||||
"Name": self.name,
|
||||
"Object": "collection",
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_details(&self, user_uuid: &str, conn: &DbConn) -> Value {
|
||||
let mut json_object = self.to_json();
|
||||
json_object["Object"] = json!("collectionDetails");
|
||||
json_object["ReadOnly"] = json!(!self.is_writable_by_user(user_uuid, conn));
|
||||
json_object["HidePasswords"] = json!(self.hide_passwords_for_user(user_uuid, conn));
|
||||
json_object
|
||||
}
|
||||
}
|
||||
|
||||
use crate::db::DbConn;
|
||||
@@ -118,11 +127,9 @@ impl Collection {
|
||||
}
|
||||
|
||||
pub fn update_users_revision(&self, conn: &DbConn) {
|
||||
UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn)
|
||||
.iter()
|
||||
.for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).iter().for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
@@ -161,10 +168,7 @@ impl Collection {
|
||||
}
|
||||
|
||||
pub fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
Self::find_by_user_uuid(user_uuid, conn)
|
||||
.into_iter()
|
||||
.filter(|c| c.org_uuid == org_uuid)
|
||||
.collect()
|
||||
Self::find_by_user_uuid(user_uuid, conn).into_iter().filter(|c| c.org_uuid == org_uuid).collect()
|
||||
}
|
||||
|
||||
pub fn find_by_organization(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
@@ -236,6 +240,28 @@ impl Collection {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hide_passwords_for_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(&user_uuid, &self.org_uuid, &conn) {
|
||||
None => true, // Not in Org
|
||||
Some(user_org) => {
|
||||
if user_org.has_full_access() {
|
||||
return false;
|
||||
}
|
||||
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(&self.uuid))
|
||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||
.filter(users_collections::hide_passwords.eq(true))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Database methods
|
||||
@@ -253,7 +279,13 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn save(user_uuid: &str, collection_uuid: &str, read_only: bool, hide_passwords: bool, conn: &DbConn) -> EmptyResult {
|
||||
pub fn save(
|
||||
user_uuid: &str,
|
||||
collection_uuid: &str,
|
||||
read_only: bool,
|
||||
hide_passwords: bool,
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
User::update_uuid_revision(&user_uuid, conn);
|
||||
|
||||
db_run! { conn:
|
||||
@@ -343,11 +375,9 @@ impl CollectionUser {
|
||||
}
|
||||
|
||||
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
CollectionUser::find_by_collection(&collection_uuid, conn)
|
||||
.iter()
|
||||
.for_each(|collection| {
|
||||
User::update_uuid_revision(&collection.user_uuid, conn);
|
||||
});
|
||||
CollectionUser::find_by_collection(&collection_uuid, conn).iter().for_each(|collection| {
|
||||
User::update_uuid_revision(&collection.user_uuid, conn);
|
||||
});
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(users_collections::table.filter(users_collections::collection_uuid.eq(collection_uuid)))
|
||||
@@ -364,7 +394,6 @@ impl CollectionUser {
|
||||
diesel::delete(users_collections::table.filter(
|
||||
users_collections::user_uuid.eq(user_uuid)
|
||||
.and(users_collections::collection_uuid.eq(user.collection_uuid))
|
||||
|
||||
))
|
||||
.execute(conn)
|
||||
.map_res("Error removing user from collections")?;
|
||||
|
@@ -4,7 +4,7 @@ use super::User;
|
||||
use crate::CONFIG;
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "devices"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
@@ -80,8 +80,8 @@ impl Device {
|
||||
let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
|
||||
|
||||
// Create the JWT claims struct, to send to the client
|
||||
use crate::auth::{encode_jwt, LoginJWTClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER};
|
||||
let claims = LoginJWTClaims {
|
||||
use crate::auth::{encode_jwt, LoginJwtClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER};
|
||||
let claims = LoginJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + *DEFAULT_VALIDITY).timestamp(),
|
||||
iss: JWT_LOGIN_ISSUER.to_string(),
|
||||
@@ -117,7 +117,7 @@ impl Device {
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
self.updated_at = Utc::now().naive_utc();
|
||||
|
||||
db_run! { conn:
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
crate::util::retry(
|
||||
|| diesel::replace_into(devices::table).values(DeviceDb::to_db(self)).execute(conn),
|
||||
|
@@ -1,7 +1,7 @@
|
||||
use super::{Cipher, User};
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations)]
|
||||
#[table_name = "favorites"]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
||||
@@ -20,7 +20,7 @@ use crate::error::MapResult;
|
||||
impl Favorite {
|
||||
// Returns whether the specified cipher is a favorite of the specified user.
|
||||
pub fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
db_run!{ conn: {
|
||||
db_run! { conn: {
|
||||
let query = favorites::table
|
||||
.filter(favorites::cipher_uuid.eq(cipher_uuid))
|
||||
.filter(favorites::user_uuid.eq(user_uuid))
|
||||
@@ -36,19 +36,19 @@ impl Favorite {
|
||||
match (old, new) {
|
||||
(false, true) => {
|
||||
User::update_uuid_revision(user_uuid, &conn);
|
||||
db_run!{ conn: {
|
||||
diesel::insert_into(favorites::table)
|
||||
.values((
|
||||
favorites::user_uuid.eq(user_uuid),
|
||||
favorites::cipher_uuid.eq(cipher_uuid),
|
||||
))
|
||||
.execute(conn)
|
||||
.map_res("Error adding favorite")
|
||||
}}
|
||||
db_run! { conn: {
|
||||
diesel::insert_into(favorites::table)
|
||||
.values((
|
||||
favorites::user_uuid.eq(user_uuid),
|
||||
favorites::cipher_uuid.eq(cipher_uuid),
|
||||
))
|
||||
.execute(conn)
|
||||
.map_res("Error adding favorite")
|
||||
}}
|
||||
}
|
||||
(true, false) => {
|
||||
User::update_uuid_revision(user_uuid, &conn);
|
||||
db_run!{ conn: {
|
||||
db_run! { conn: {
|
||||
diesel::delete(
|
||||
favorites::table
|
||||
.filter(favorites::user_uuid.eq(user_uuid))
|
||||
@@ -59,7 +59,7 @@ impl Favorite {
|
||||
}}
|
||||
}
|
||||
// Otherwise, the favorite status is already what it should be.
|
||||
_ => Ok(())
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -4,7 +4,7 @@ use serde_json::Value;
|
||||
use super::{Cipher, User};
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "folders"]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
@@ -16,7 +16,7 @@ db_object! {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations)]
|
||||
#[table_name = "folders_ciphers"]
|
||||
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
||||
#[belongs_to(Folder, foreign_key = "folder_uuid")]
|
||||
@@ -109,7 +109,6 @@ impl Folder {
|
||||
User::update_uuid_revision(&self.user_uuid, conn);
|
||||
FolderCipher::delete_all_by_folder(&self.uuid, &conn)?;
|
||||
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(folders::table.filter(folders::uuid.eq(&self.uuid)))
|
||||
.execute(conn)
|
||||
|
@@ -6,6 +6,7 @@ mod favorite;
|
||||
mod folder;
|
||||
mod org_policy;
|
||||
mod organization;
|
||||
mod send;
|
||||
mod two_factor;
|
||||
mod user;
|
||||
|
||||
@@ -17,5 +18,6 @@ pub use self::favorite::Favorite;
|
||||
pub use self::folder::{Folder, FolderCipher};
|
||||
pub use self::org_policy::{OrgPolicy, OrgPolicyType};
|
||||
pub use self::organization::{Organization, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
pub use self::send::{Send, SendType};
|
||||
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
||||
pub use self::user::{Invitation, User, UserStampException};
|
||||
|
@@ -4,10 +4,10 @@ use crate::api::EmptyResult;
|
||||
use crate::db::DbConn;
|
||||
use crate::error::MapResult;
|
||||
|
||||
use super::{Organization, UserOrgStatus};
|
||||
use super::{Organization, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "org_policies"]
|
||||
#[belongs_to(Organization, foreign_key = "org_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
@@ -20,12 +20,15 @@ db_object! {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(num_derive::FromPrimitive)]
|
||||
#[derive(Copy, Clone, num_derive::FromPrimitive)]
|
||||
pub enum OrgPolicyType {
|
||||
TwoFactorAuthentication = 0,
|
||||
MasterPassword = 1,
|
||||
PasswordGenerator = 2,
|
||||
// SingleOrg = 3, // Not currently supported.
|
||||
// RequireSso = 4, // Not currently supported.
|
||||
PersonalOwnership = 5,
|
||||
DisableSend = 6,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
@@ -40,6 +43,10 @@ impl OrgPolicy {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_type(&self, policy_type: OrgPolicyType) -> bool {
|
||||
self.atype == policy_type as i32
|
||||
}
|
||||
|
||||
pub fn to_json(&self) -> Value {
|
||||
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
|
||||
json!({
|
||||
@@ -163,6 +170,24 @@ impl OrgPolicy {
|
||||
}}
|
||||
}
|
||||
|
||||
/// Returns true if the user belongs to an org that has enabled the specified policy type,
|
||||
/// and the user is not an owner or admin of that org. This is only useful for checking
|
||||
/// applicability of policy types that have these particular semantics.
|
||||
pub fn is_applicable_to_user(user_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> bool {
|
||||
// Returns confirmed users only.
|
||||
for policy in OrgPolicy::find_by_user(user_uuid, conn) {
|
||||
if policy.enabled && policy.has_type(policy_type) {
|
||||
let org_uuid = &policy.org_uuid;
|
||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) {
|
||||
if user.atype < UserOrgType::Admin {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/*pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
|
||||
|
@@ -1,11 +1,11 @@
|
||||
use num_traits::FromPrimitive;
|
||||
use serde_json::Value;
|
||||
use std::cmp::Ordering;
|
||||
use num_traits::FromPrimitive;
|
||||
|
||||
use super::{CollectionUser, User, OrgPolicy};
|
||||
use super::{CollectionUser, OrgPolicy, User};
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "organizations"]
|
||||
#[primary_key(uuid)]
|
||||
pub struct Organization {
|
||||
@@ -14,7 +14,7 @@ db_object! {
|
||||
pub billing_email: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "users_organizations"]
|
||||
#[primary_key(uuid)]
|
||||
pub struct UserOrganization {
|
||||
@@ -35,8 +35,7 @@ pub enum UserOrgStatus {
|
||||
Confirmed = 2,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
#[derive(num_derive::FromPrimitive)]
|
||||
#[derive(Copy, Clone, PartialEq, Eq, num_derive::FromPrimitive)]
|
||||
pub enum UserOrgType {
|
||||
Owner = 0,
|
||||
Admin = 1,
|
||||
@@ -90,17 +89,11 @@ impl PartialOrd<i32> for UserOrgType {
|
||||
}
|
||||
|
||||
fn gt(&self, other: &i32) -> bool {
|
||||
match self.partial_cmp(other) {
|
||||
Some(Ordering::Less) | Some(Ordering::Equal) => false,
|
||||
_ => true,
|
||||
}
|
||||
matches!(self.partial_cmp(other), Some(Ordering::Greater))
|
||||
}
|
||||
|
||||
fn ge(&self, other: &i32) -> bool {
|
||||
match self.partial_cmp(other) {
|
||||
Some(Ordering::Less) => false,
|
||||
_ => true,
|
||||
}
|
||||
matches!(self.partial_cmp(other), Some(Ordering::Greater) | Some(Ordering::Equal))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,17 +112,11 @@ impl PartialOrd<UserOrgType> for i32 {
|
||||
}
|
||||
|
||||
fn lt(&self, other: &UserOrgType) -> bool {
|
||||
match self.partial_cmp(other) {
|
||||
Some(Ordering::Less) | None => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(self.partial_cmp(other), Some(Ordering::Less) | None)
|
||||
}
|
||||
|
||||
fn le(&self, other: &UserOrgType) -> bool {
|
||||
match self.partial_cmp(other) {
|
||||
Some(Ordering::Less) | Some(Ordering::Equal) | None => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(self.partial_cmp(other), Some(Ordering::Less) | Some(Ordering::Equal) | None)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -147,9 +134,10 @@ impl Organization {
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"Identifier": null, // not supported by us
|
||||
"Name": self.name,
|
||||
"Seats": 10,
|
||||
"MaxCollections": 10,
|
||||
"Seats": 10, // The value doesn't matter, we don't check server-side
|
||||
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
|
||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||
"Use2fa": true,
|
||||
"UseDirectory": false,
|
||||
@@ -157,6 +145,9 @@ impl Organization {
|
||||
"UseGroups": false,
|
||||
"UseTotp": true,
|
||||
"UsePolicies": true,
|
||||
"UseSso": false, // We do not support SSO
|
||||
"SelfHost": true,
|
||||
"UseApi": false, // not supported by us
|
||||
|
||||
"BusinessName": null,
|
||||
"BusinessAddress1": null,
|
||||
@@ -198,11 +189,9 @@ use crate::error::MapResult;
|
||||
/// Database methods
|
||||
impl Organization {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
UserOrganization::find_by_org(&self.uuid, conn)
|
||||
.iter()
|
||||
.for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
UserOrganization::find_by_org(&self.uuid, conn).iter().for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -244,7 +233,6 @@ impl Organization {
|
||||
UserOrganization::delete_all_by_organization(&self.uuid, &conn)?;
|
||||
OrgPolicy::delete_all_by_organization(&self.uuid, &conn)?;
|
||||
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
|
||||
.execute(conn)
|
||||
@@ -274,9 +262,10 @@ impl UserOrganization {
|
||||
|
||||
json!({
|
||||
"Id": self.org_uuid,
|
||||
"Identifier": null, // not supported by us
|
||||
"Name": org.name,
|
||||
"Seats": 10,
|
||||
"MaxCollections": 10,
|
||||
"Seats": 10, // The value doesn't matter, we don't check server-side
|
||||
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
|
||||
"UsersGetPremium": true,
|
||||
|
||||
"Use2fa": true,
|
||||
@@ -285,8 +274,30 @@ impl UserOrganization {
|
||||
"UseGroups": false,
|
||||
"UseTotp": true,
|
||||
"UsePolicies": true,
|
||||
"UseApi": false,
|
||||
"UseApi": false, // not supported by us
|
||||
"SelfHost": true,
|
||||
"SsoBound": false, // We do not support SSO
|
||||
"UseSso": false, // We do not support SSO
|
||||
// TODO: Add support for Business Portal
|
||||
// Upstream is moving Policies and SSO management outside of the web-vault to /portal
|
||||
// For now they still have that code also in the web-vault, but they will remove it at some point.
|
||||
// https://github.com/bitwarden/server/tree/master/bitwarden_license/src/
|
||||
"UseBusinessPortal": false, // Disable BusinessPortal Button
|
||||
|
||||
// TODO: Add support for Custom User Roles
|
||||
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
|
||||
// "Permissions": {
|
||||
// "AccessBusinessPortal": false,
|
||||
// "AccessEventLogs": false,
|
||||
// "AccessImportExport": false,
|
||||
// "AccessReports": false,
|
||||
// "ManageAllCollections": false,
|
||||
// "ManageAssignedCollections": false,
|
||||
// "ManageGroups": false,
|
||||
// "ManagePolicies": false,
|
||||
// "ManageSso": false,
|
||||
// "ManageUsers": false
|
||||
// },
|
||||
|
||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||
|
||||
@@ -332,11 +343,13 @@ impl UserOrganization {
|
||||
let collections = CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn);
|
||||
collections
|
||||
.iter()
|
||||
.map(|c| json!({
|
||||
"Id": c.collection_uuid,
|
||||
"ReadOnly": c.read_only,
|
||||
"HidePasswords": c.hide_passwords,
|
||||
}))
|
||||
.map(|c| {
|
||||
json!({
|
||||
"Id": c.collection_uuid,
|
||||
"ReadOnly": c.read_only,
|
||||
"HidePasswords": c.hide_passwords,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
|
||||
@@ -412,13 +425,26 @@ impl UserOrganization {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn has_status(self, status: UserOrgStatus) -> bool {
|
||||
pub fn find_by_email_and_org(email: &str, org_id: &str, conn: &DbConn) -> Option<UserOrganization> {
|
||||
if let Some(user) = super::User::find_by_mail(email, conn) {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, &conn) {
|
||||
return Some(user_org);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub fn has_status(&self, status: UserOrgStatus) -> bool {
|
||||
self.status == status as i32
|
||||
}
|
||||
|
||||
pub fn has_full_access(self) -> bool {
|
||||
(self.access_all || self.atype >= UserOrgType::Admin) &&
|
||||
self.has_status(UserOrgStatus::Confirmed)
|
||||
pub fn has_type(&self, user_type: UserOrgType) -> bool {
|
||||
self.atype == user_type as i32
|
||||
}
|
||||
|
||||
pub fn has_full_access(&self) -> bool {
|
||||
(self.access_all || self.atype >= UserOrgType::Admin) && self.has_status(UserOrgStatus::Confirmed)
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
|
284
src/db/models/send.rs
Normal file
@@ -0,0 +1,284 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{Organization, User};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "sends"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[belongs_to(Organization, foreign_key = "organization_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
pub struct Send {
|
||||
pub uuid: String,
|
||||
|
||||
pub user_uuid: Option<String>,
|
||||
pub organization_uuid: Option<String>,
|
||||
|
||||
|
||||
pub name: String,
|
||||
pub notes: Option<String>,
|
||||
|
||||
pub atype: i32,
|
||||
pub data: String,
|
||||
pub akey: String,
|
||||
pub password_hash: Option<Vec<u8>>,
|
||||
password_salt: Option<Vec<u8>>,
|
||||
password_iter: Option<i32>,
|
||||
|
||||
pub max_access_count: Option<i32>,
|
||||
pub access_count: i32,
|
||||
|
||||
pub creation_date: NaiveDateTime,
|
||||
pub revision_date: NaiveDateTime,
|
||||
pub expiration_date: Option<NaiveDateTime>,
|
||||
pub deletion_date: NaiveDateTime,
|
||||
|
||||
pub disabled: bool,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, num_derive::FromPrimitive)]
|
||||
pub enum SendType {
|
||||
Text = 0,
|
||||
File = 1,
|
||||
}
|
||||
|
||||
impl Send {
|
||||
pub fn new(atype: i32, name: String, data: String, akey: String, deletion_date: NaiveDateTime) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
user_uuid: None,
|
||||
organization_uuid: None,
|
||||
|
||||
name,
|
||||
notes: None,
|
||||
|
||||
atype,
|
||||
data,
|
||||
akey,
|
||||
password_hash: None,
|
||||
password_salt: None,
|
||||
password_iter: None,
|
||||
|
||||
max_access_count: None,
|
||||
access_count: 0,
|
||||
|
||||
creation_date: now,
|
||||
revision_date: now,
|
||||
expiration_date: None,
|
||||
deletion_date,
|
||||
|
||||
disabled: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_password(&mut self, password: Option<&str>) {
|
||||
const PASSWORD_ITER: i32 = 100_000;
|
||||
|
||||
if let Some(password) = password {
|
||||
self.password_iter = Some(PASSWORD_ITER);
|
||||
let salt = crate::crypto::get_random_64();
|
||||
let hash = crate::crypto::hash_password(password.as_bytes(), &salt, PASSWORD_ITER as u32);
|
||||
self.password_salt = Some(salt);
|
||||
self.password_hash = Some(hash);
|
||||
} else {
|
||||
self.password_iter = None;
|
||||
self.password_salt = None;
|
||||
self.password_hash = None;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_password(&self, password: &str) -> bool {
|
||||
match (&self.password_hash, &self.password_salt, self.password_iter) {
|
||||
(Some(hash), Some(salt), Some(iter)) => {
|
||||
crate::crypto::verify_password_hash(password.as_bytes(), salt, hash, iter as u32)
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_json(&self) -> Value {
|
||||
use crate::util::format_date;
|
||||
use data_encoding::BASE64URL_NOPAD;
|
||||
use uuid::Uuid;
|
||||
|
||||
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"AccessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
|
||||
"Type": self.atype,
|
||||
|
||||
"Name": self.name,
|
||||
"Notes": self.notes,
|
||||
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
||||
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
||||
|
||||
"Key": self.akey,
|
||||
"MaxAccessCount": self.max_access_count,
|
||||
"AccessCount": self.access_count,
|
||||
"Password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
|
||||
"Disabled": self.disabled,
|
||||
|
||||
"RevisionDate": format_date(&self.revision_date),
|
||||
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
|
||||
"DeletionDate": format_date(&self.deletion_date),
|
||||
"Object": "send",
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_access(&self) -> Value {
|
||||
use crate::util::format_date;
|
||||
|
||||
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"Type": self.atype,
|
||||
|
||||
"Name": self.name,
|
||||
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
||||
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
||||
|
||||
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
|
||||
"Object": "send-access",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::api::EmptyResult;
|
||||
use crate::error::MapResult;
|
||||
|
||||
impl Send {
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
self.revision_date = Utc::now().naive_utc();
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
match diesel::replace_into(sends::table)
|
||||
.values(SendDb::to_db(self))
|
||||
.execute(conn)
|
||||
{
|
||||
Ok(_) => Ok(()),
|
||||
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
||||
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
|
||||
diesel::update(sends::table)
|
||||
.filter(sends::uuid.eq(&self.uuid))
|
||||
.set(SendDb::to_db(self))
|
||||
.execute(conn)
|
||||
.map_res("Error saving send")
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
}.map_res("Error saving send")
|
||||
}
|
||||
postgresql {
|
||||
let value = SendDb::to_db(self);
|
||||
diesel::insert_into(sends::table)
|
||||
.values(&value)
|
||||
.on_conflict(sends::uuid)
|
||||
.do_update()
|
||||
.set(&value)
|
||||
.execute(conn)
|
||||
.map_res("Error saving send")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
|
||||
if self.atype == SendType::File as i32 {
|
||||
std::fs::remove_dir_all(std::path::Path::new(&crate::CONFIG.sends_folder()).join(&self.uuid)).ok();
|
||||
}
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(sends::table.filter(sends::uuid.eq(&self.uuid)))
|
||||
.execute(conn)
|
||||
.map_res("Error deleting send")
|
||||
}}
|
||||
}
|
||||
|
||||
/// Purge all sends that are past their deletion date.
|
||||
pub fn purge(conn: &DbConn) {
|
||||
for send in Self::find_by_past_deletion_date(&conn) {
|
||||
send.delete(&conn).ok();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_users_revision(&self, conn: &DbConn) {
|
||||
match &self.user_uuid {
|
||||
Some(user_uuid) => {
|
||||
User::update_uuid_revision(&user_uuid, conn);
|
||||
}
|
||||
None => {
|
||||
// Belongs to Organization, not implemented
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for send in Self::find_by_user(user_uuid, &conn) {
|
||||
send.delete(&conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn find_by_access_id(access_id: &str, conn: &DbConn) -> Option<Self> {
|
||||
use data_encoding::BASE64URL_NOPAD;
|
||||
use uuid::Uuid;
|
||||
|
||||
let uuid_vec = match BASE64URL_NOPAD.decode(access_id.as_bytes()) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return None,
|
||||
};
|
||||
|
||||
let uuid = match Uuid::from_slice(&uuid_vec) {
|
||||
Ok(u) => u.to_string(),
|
||||
Err(_) => return None,
|
||||
};
|
||||
|
||||
Self::find_by_uuid(&uuid, conn)
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
.filter(sends::uuid.eq(uuid))
|
||||
.first::<SendDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
.filter(sends::user_uuid.eq(user_uuid))
|
||||
.load::<SendDb>(conn).expect("Error loading sends").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
.filter(sends::organization_uuid.eq(org_uuid))
|
||||
.load::<SendDb>(conn).expect("Error loading sends").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_past_deletion_date(conn: &DbConn) -> Vec<Self> {
|
||||
let now = Utc::now().naive_utc();
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
.filter(sends::deletion_date.lt(now))
|
||||
.load::<SendDb>(conn).expect("Error loading sends").from_db()
|
||||
}}
|
||||
}
|
||||
}
|
@@ -7,7 +7,7 @@ use crate::error::MapResult;
|
||||
use super::User;
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "twofactor"]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
|
@@ -5,7 +5,7 @@ use crate::crypto;
|
||||
use crate::CONFIG;
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "users"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[primary_key(uuid)]
|
||||
@@ -47,7 +47,7 @@ db_object! {
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable)]
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
#[table_name = "invitations"]
|
||||
#[primary_key(email)]
|
||||
pub struct Invitation {
|
||||
@@ -63,8 +63,8 @@ enum UserStatus {
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct UserStampException {
|
||||
pub route: String,
|
||||
pub security_stamp: String
|
||||
pub route: String,
|
||||
pub security_stamp: String,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
@@ -162,7 +162,7 @@ impl User {
|
||||
pub fn set_stamp_exception(&mut self, route_exception: &str) {
|
||||
let stamp_exception = UserStampException {
|
||||
route: route_exception.to_string(),
|
||||
security_stamp: self.security_stamp.to_string()
|
||||
security_stamp: self.security_stamp.to_string(),
|
||||
};
|
||||
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
|
||||
}
|
||||
@@ -177,7 +177,7 @@ impl User {
|
||||
}
|
||||
}
|
||||
|
||||
use super::{Cipher, Device, Favorite, Folder, TwoFactor, UserOrgType, UserOrganization};
|
||||
use super::{Cipher, Device, Favorite, Folder, Send, TwoFactor, UserOrgType, UserOrganization};
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::api::EmptyResult;
|
||||
@@ -263,6 +263,7 @@ impl User {
|
||||
}
|
||||
}
|
||||
|
||||
Send::delete_all_by_user(&self.uuid, conn)?;
|
||||
UserOrganization::delete_all_by_user(&self.uuid, conn)?;
|
||||
Cipher::delete_all_by_user(&self.uuid, conn)?;
|
||||
Favorite::delete_all_by_user(&self.uuid, conn)?;
|
||||
@@ -340,14 +341,16 @@ impl User {
|
||||
pub fn last_active(&self, conn: &DbConn) -> Option<NaiveDateTime> {
|
||||
match Device::find_latest_active_by_user(&self.uuid, conn) {
|
||||
Some(device) => Some(device.updated_at),
|
||||
None => None
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Invitation {
|
||||
pub const fn new(email: String) -> Self {
|
||||
Self { email }
|
||||
Self {
|
||||
email,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
|
@@ -102,6 +102,29 @@ table! {
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
sends (uuid) {
|
||||
uuid -> Text,
|
||||
user_uuid -> Nullable<Text>,
|
||||
organization_uuid -> Nullable<Text>,
|
||||
name -> Text,
|
||||
notes -> Nullable<Text>,
|
||||
atype -> Integer,
|
||||
data -> Text,
|
||||
akey -> Text,
|
||||
password_hash -> Nullable<Binary>,
|
||||
password_salt -> Nullable<Binary>,
|
||||
password_iter -> Nullable<Integer>,
|
||||
max_access_count -> Nullable<Integer>,
|
||||
access_count -> Integer,
|
||||
creation_date -> Datetime,
|
||||
revision_date -> Datetime,
|
||||
expiration_date -> Nullable<Datetime>,
|
||||
deletion_date -> Datetime,
|
||||
disabled -> Bool,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
twofactor (uuid) {
|
||||
uuid -> Text,
|
||||
@@ -176,6 +199,8 @@ joinable!(folders -> users (user_uuid));
|
||||
joinable!(folders_ciphers -> ciphers (cipher_uuid));
|
||||
joinable!(folders_ciphers -> folders (folder_uuid));
|
||||
joinable!(org_policies -> organizations (org_uuid));
|
||||
joinable!(sends -> organizations (organization_uuid));
|
||||
joinable!(sends -> users (user_uuid));
|
||||
joinable!(twofactor -> users (user_uuid));
|
||||
joinable!(users_collections -> collections (collection_uuid));
|
||||
joinable!(users_collections -> users (user_uuid));
|
||||
@@ -193,6 +218,7 @@ allow_tables_to_appear_in_same_query!(
|
||||
invitations,
|
||||
org_policies,
|
||||
organizations,
|
||||
sends,
|
||||
twofactor,
|
||||
users,
|
||||
users_collections,
|
||||
|
@@ -102,6 +102,29 @@ table! {
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
sends (uuid) {
|
||||
uuid -> Text,
|
||||
user_uuid -> Nullable<Text>,
|
||||
organization_uuid -> Nullable<Text>,
|
||||
name -> Text,
|
||||
notes -> Nullable<Text>,
|
||||
atype -> Integer,
|
||||
data -> Text,
|
||||
akey -> Text,
|
||||
password_hash -> Nullable<Binary>,
|
||||
password_salt -> Nullable<Binary>,
|
||||
password_iter -> Nullable<Integer>,
|
||||
max_access_count -> Nullable<Integer>,
|
||||
access_count -> Integer,
|
||||
creation_date -> Timestamp,
|
||||
revision_date -> Timestamp,
|
||||
expiration_date -> Nullable<Timestamp>,
|
||||
deletion_date -> Timestamp,
|
||||
disabled -> Bool,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
twofactor (uuid) {
|
||||
uuid -> Text,
|
||||
@@ -176,6 +199,8 @@ joinable!(folders -> users (user_uuid));
|
||||
joinable!(folders_ciphers -> ciphers (cipher_uuid));
|
||||
joinable!(folders_ciphers -> folders (folder_uuid));
|
||||
joinable!(org_policies -> organizations (org_uuid));
|
||||
joinable!(sends -> organizations (organization_uuid));
|
||||
joinable!(sends -> users (user_uuid));
|
||||
joinable!(twofactor -> users (user_uuid));
|
||||
joinable!(users_collections -> collections (collection_uuid));
|
||||
joinable!(users_collections -> users (user_uuid));
|
||||
@@ -193,6 +218,7 @@ allow_tables_to_appear_in_same_query!(
|
||||
invitations,
|
||||
org_policies,
|
||||
organizations,
|
||||
sends,
|
||||
twofactor,
|
||||
users,
|
||||
users_collections,
|
||||
|
@@ -102,6 +102,29 @@ table! {
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
sends (uuid) {
|
||||
uuid -> Text,
|
||||
user_uuid -> Nullable<Text>,
|
||||
organization_uuid -> Nullable<Text>,
|
||||
name -> Text,
|
||||
notes -> Nullable<Text>,
|
||||
atype -> Integer,
|
||||
data -> Text,
|
||||
akey -> Text,
|
||||
password_hash -> Nullable<Binary>,
|
||||
password_salt -> Nullable<Binary>,
|
||||
password_iter -> Nullable<Integer>,
|
||||
max_access_count -> Nullable<Integer>,
|
||||
access_count -> Integer,
|
||||
creation_date -> Timestamp,
|
||||
revision_date -> Timestamp,
|
||||
expiration_date -> Nullable<Timestamp>,
|
||||
deletion_date -> Timestamp,
|
||||
disabled -> Bool,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
twofactor (uuid) {
|
||||
uuid -> Text,
|
||||
@@ -176,6 +199,8 @@ joinable!(folders -> users (user_uuid));
|
||||
joinable!(folders_ciphers -> ciphers (cipher_uuid));
|
||||
joinable!(folders_ciphers -> folders (folder_uuid));
|
||||
joinable!(org_policies -> organizations (org_uuid));
|
||||
joinable!(sends -> organizations (organization_uuid));
|
||||
joinable!(sends -> users (user_uuid));
|
||||
joinable!(twofactor -> users (user_uuid));
|
||||
joinable!(users_collections -> collections (collection_uuid));
|
||||
joinable!(users_collections -> users (user_uuid));
|
||||
@@ -193,6 +218,7 @@ allow_tables_to_appear_in_same_query!(
|
||||
invitations,
|
||||
org_policies,
|
||||
organizations,
|
||||
sends,
|
||||
twofactor,
|
||||
users,
|
||||
users_collections,
|
||||
|
31
src/error.rs
@@ -33,16 +33,16 @@ macro_rules! make_error {
|
||||
};
|
||||
}
|
||||
|
||||
use diesel::r2d2::PoolError as R2d2Err;
|
||||
use diesel::result::Error as DieselErr;
|
||||
use diesel::ConnectionError as DieselConErr;
|
||||
use diesel_migrations::RunMigrationsError as DieselMigErr;
|
||||
use diesel::r2d2::PoolError as R2d2Err;
|
||||
use handlebars::RenderError as HbErr;
|
||||
use jsonwebtoken::errors::Error as JWTErr;
|
||||
use jsonwebtoken::errors::Error as JwtErr;
|
||||
use regex::Error as RegexErr;
|
||||
use reqwest::Error as ReqErr;
|
||||
use serde_json::{Error as SerdeErr, Value};
|
||||
use std::io::Error as IOErr;
|
||||
use std::io::Error as IoErr;
|
||||
|
||||
use std::time::SystemTimeError as TimeErr;
|
||||
use u2f::u2ferror::U2fError as U2fErr;
|
||||
@@ -72,10 +72,10 @@ make_error! {
|
||||
R2d2Error(R2d2Err): _has_source, _api_error,
|
||||
U2fError(U2fErr): _has_source, _api_error,
|
||||
SerdeError(SerdeErr): _has_source, _api_error,
|
||||
JWTError(JWTErr): _has_source, _api_error,
|
||||
JWtError(JwtErr): _has_source, _api_error,
|
||||
TemplError(HbErr): _has_source, _api_error,
|
||||
//WsError(ws::Error): _has_source, _api_error,
|
||||
IOError(IOErr): _has_source, _api_error,
|
||||
IoError(IoErr): _has_source, _api_error,
|
||||
TimeError(TimeErr): _has_source, _api_error,
|
||||
ReqError(ReqErr): _has_source, _api_error,
|
||||
RegexError(RegexErr): _has_source, _api_error,
|
||||
@@ -152,6 +152,7 @@ impl<S> MapResult<S> for Option<S> {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
const fn _has_source<T>(e: T) -> Option<T> {
|
||||
Some(e)
|
||||
}
|
||||
@@ -190,18 +191,14 @@ use rocket::response::{self, Responder, Response};
|
||||
impl<'r> Responder<'r> for Error {
|
||||
fn respond_to(self, _: &Request) -> response::Result<'r> {
|
||||
match self.error {
|
||||
ErrorKind::EmptyError(_) => {} // Don't print the error in this situation
|
||||
ErrorKind::EmptyError(_) => {} // Don't print the error in this situation
|
||||
ErrorKind::SimpleError(_) => {} // Don't print the error in this situation
|
||||
_ => error!(target: "error", "{:#?}", self),
|
||||
};
|
||||
|
||||
let code = Status::from_code(self.error_code).unwrap_or(Status::BadRequest);
|
||||
|
||||
Response::build()
|
||||
.status(code)
|
||||
.header(ContentType::JSON)
|
||||
.sized_body(Cursor::new(format!("{}", self)))
|
||||
.ok()
|
||||
Response::build().status(code).header(ContentType::JSON).sized_body(Cursor::new(format!("{}", self))).ok()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -220,6 +217,18 @@ macro_rules! err {
|
||||
}};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! err_code {
|
||||
($msg:expr, $err_code: literal) => {{
|
||||
error!("{}", $msg);
|
||||
return Err(crate::error::Error::new($msg, $msg).with_code($err_code));
|
||||
}};
|
||||
($usr_msg:expr, $log_value:expr, $err_code: literal) => {{
|
||||
error!("{}. {}", $usr_msg, $log_value);
|
||||
return Err(crate::error::Error::new($usr_msg, $log_value).with_code($err_code));
|
||||
}};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! err_discard {
|
||||
($msg:expr, $data:expr) => {{
|
||||
|
94
src/mail.rs
@@ -1,4 +1,4 @@
|
||||
use std::{str::FromStr};
|
||||
use std::str::FromStr;
|
||||
|
||||
use chrono::{DateTime, Local};
|
||||
use percent_encoding::{percent_encode, NON_ALPHANUMERIC};
|
||||
@@ -30,10 +30,10 @@ fn mailer() -> SmtpTransport {
|
||||
let smtp_client = if CONFIG.smtp_ssl() {
|
||||
let mut tls_parameters = TlsParameters::builder(host);
|
||||
if CONFIG.smtp_accept_invalid_hostnames() {
|
||||
tls_parameters.dangerous_accept_invalid_hostnames(true);
|
||||
tls_parameters = tls_parameters.dangerous_accept_invalid_hostnames(true);
|
||||
}
|
||||
if CONFIG.smtp_accept_invalid_certs() {
|
||||
tls_parameters.dangerous_accept_invalid_certs(true);
|
||||
tls_parameters = tls_parameters.dangerous_accept_invalid_certs(true);
|
||||
}
|
||||
let tls_parameters = tls_parameters.build().unwrap();
|
||||
|
||||
@@ -58,15 +58,17 @@ fn mailer() -> SmtpTransport {
|
||||
|
||||
let smtp_client = match CONFIG.smtp_auth_mechanism() {
|
||||
Some(mechanism) => {
|
||||
let allowed_mechanisms = vec![SmtpAuthMechanism::Plain, SmtpAuthMechanism::Login, SmtpAuthMechanism::Xoauth2];
|
||||
let allowed_mechanisms = [SmtpAuthMechanism::Plain, SmtpAuthMechanism::Login, SmtpAuthMechanism::Xoauth2];
|
||||
let mut selected_mechanisms = vec![];
|
||||
for wanted_mechanism in mechanism.split(',') {
|
||||
for m in &allowed_mechanisms {
|
||||
if m.to_string().to_lowercase() == wanted_mechanism.trim_matches(|c| c == '"' || c == '\'' || c == ' ').to_lowercase() {
|
||||
if m.to_string().to_lowercase()
|
||||
== wanted_mechanism.trim_matches(|c| c == '"' || c == '\'' || c == ' ').to_lowercase()
|
||||
{
|
||||
selected_mechanisms.push(*m);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if !selected_mechanisms.is_empty() {
|
||||
smtp_client.authentication(selected_mechanisms)
|
||||
@@ -115,7 +117,7 @@ pub fn send_password_hint(address: &str, hint: Option<String>) -> EmptyResult {
|
||||
|
||||
let (subject, body_html, body_text) = get_text(template_name, json!({ "hint": hint, "url": CONFIG.domain() }))?;
|
||||
|
||||
send_email(address, &subject, &body_html, &body_text)
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_delete_account(address: &str, uuid: &str) -> EmptyResult {
|
||||
@@ -132,7 +134,7 @@ pub fn send_delete_account(address: &str, uuid: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, &body_html, &body_text)
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_verify_email(address: &str, uuid: &str) -> EmptyResult {
|
||||
@@ -149,7 +151,7 @@ pub fn send_verify_email(address: &str, uuid: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, &body_html, &body_text)
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_welcome(address: &str) -> EmptyResult {
|
||||
@@ -160,7 +162,7 @@ pub fn send_welcome(address: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, &body_html, &body_text)
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_welcome_must_verify(address: &str, uuid: &str) -> EmptyResult {
|
||||
@@ -176,7 +178,7 @@ pub fn send_welcome_must_verify(address: &str, uuid: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, &body_html, &body_text)
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_invite(
|
||||
@@ -200,15 +202,15 @@ pub fn send_invite(
|
||||
"email/send_org_invite",
|
||||
json!({
|
||||
"url": CONFIG.domain(),
|
||||
"org_id": org_id.unwrap_or_else(|| "_".to_string()),
|
||||
"org_user_id": org_user_id.unwrap_or_else(|| "_".to_string()),
|
||||
"org_id": org_id.as_deref().unwrap_or("_"),
|
||||
"org_user_id": org_user_id.as_deref().unwrap_or("_"),
|
||||
"email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(),
|
||||
"org_name": org_name,
|
||||
"token": invite_token,
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, &body_html, &body_text)
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_invite_accepted(new_user_email: &str, address: &str, org_name: &str) -> EmptyResult {
|
||||
@@ -221,7 +223,7 @@ pub fn send_invite_accepted(new_user_email: &str, address: &str, org_name: &str)
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, &body_html, &body_text)
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_invite_confirmed(address: &str, org_name: &str) -> EmptyResult {
|
||||
@@ -233,7 +235,7 @@ pub fn send_invite_confirmed(address: &str, org_name: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, &body_html, &body_text)
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_new_device_logged_in(address: &str, ip: &str, dt: &DateTime<Local>, device: &str) -> EmptyResult {
|
||||
@@ -251,7 +253,7 @@ pub fn send_new_device_logged_in(address: &str, ip: &str, dt: &DateTime<Local>,
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, &body_html, &body_text)
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_token(address: &str, token: &str) -> EmptyResult {
|
||||
@@ -263,7 +265,7 @@ pub fn send_token(address: &str, token: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, &body_html, &body_text)
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_change_email(address: &str, token: &str) -> EmptyResult {
|
||||
@@ -275,7 +277,7 @@ pub fn send_change_email(address: &str, token: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, &body_html, &body_text)
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
pub fn send_test(address: &str) -> EmptyResult {
|
||||
@@ -286,10 +288,10 @@ pub fn send_test(address: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, &body_html, &body_text)
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
}
|
||||
|
||||
fn send_email(address: &str, subject: &str, body_html: &str, body_text: &str) -> EmptyResult {
|
||||
fn send_email(address: &str, subject: &str, body_html: String, body_text: String) -> EmptyResult {
|
||||
let address_split: Vec<&str> = address.rsplitn(2, '@').collect();
|
||||
if address_split.len() != 2 {
|
||||
err!("Invalid email address (no @)");
|
||||
@@ -302,49 +304,41 @@ fn send_email(address: &str, subject: &str, body_html: &str, body_text: &str) ->
|
||||
|
||||
let address = format!("{}@{}", address_split[1], domain_puny);
|
||||
|
||||
let html = SinglePart::base64()
|
||||
let html = SinglePart::builder()
|
||||
// We force Base64 encoding because in the past we had issues with different encodings.
|
||||
.header(header::ContentTransferEncoding::Base64)
|
||||
.header(header::ContentType("text/html; charset=utf-8".parse()?))
|
||||
.body(body_html);
|
||||
|
||||
let text = SinglePart::base64()
|
||||
let text = SinglePart::builder()
|
||||
// We force Base64 encoding because in the past we had issues with different encodings.
|
||||
.header(header::ContentTransferEncoding::Base64)
|
||||
.header(header::ContentType("text/plain; charset=utf-8".parse()?))
|
||||
.body(body_text);
|
||||
|
||||
// The boundary generated by Lettre it self is mostly too large based on the RFC822, so we generate one our selfs.
|
||||
use uuid::Uuid;
|
||||
let unique_id = Uuid::new_v4().to_simple();
|
||||
let boundary = format!("_Part_{}_", unique_id);
|
||||
let alternative = MultiPart::alternative().boundary(boundary).singlepart(text).singlepart(html);
|
||||
let smtp_from = &CONFIG.smtp_from();
|
||||
|
||||
let email = Message::builder()
|
||||
.message_id(Some(format!("<{}.{}>", unique_id, smtp_from)))
|
||||
.message_id(Some(format!("<{}@{}>", crate::util::get_uuid(), smtp_from.split('@').collect::<Vec<&str>>()[1])))
|
||||
.to(Mailbox::new(None, Address::from_str(&address)?))
|
||||
.from(Mailbox::new(
|
||||
Some(CONFIG.smtp_from_name()),
|
||||
Address::from_str(smtp_from)?,
|
||||
))
|
||||
.from(Mailbox::new(Some(CONFIG.smtp_from_name()), Address::from_str(smtp_from)?))
|
||||
.subject(subject)
|
||||
.multipart(alternative)?;
|
||||
.multipart(MultiPart::alternative().singlepart(text).singlepart(html))?;
|
||||
|
||||
match mailer().send(&email) {
|
||||
Ok(_) => Ok(()),
|
||||
// Match some common errors and make them more user friendly
|
||||
Err(e) => match e {
|
||||
lettre::transport::smtp::Error::Client(x) => {
|
||||
err!(format!("SMTP Client error: {}", x));
|
||||
},
|
||||
lettre::transport::smtp::Error::Transient(x) => {
|
||||
err!(format!("SMTP 4xx error: {:?}", x.message));
|
||||
},
|
||||
lettre::transport::smtp::Error::Permanent(x) => {
|
||||
err!(format!("SMTP 5xx error: {:?}", x.message));
|
||||
},
|
||||
lettre::transport::smtp::Error::Io(x) => {
|
||||
err!(format!("SMTP IO error: {}", x));
|
||||
},
|
||||
// Fallback for all other errors
|
||||
_ => Err(e.into())
|
||||
Err(e) => {
|
||||
if e.is_client() {
|
||||
err!(format!("SMTP Client error: {}", e));
|
||||
} else if e.is_transient() {
|
||||
err!(format!("SMTP 4xx error: {:?}", e));
|
||||
} else if e.is_permanent() {
|
||||
err!(format!("SMTP 5xx error: {:?}", e));
|
||||
} else if e.is_timeout() {
|
||||
err!(format!("SMTP timeout error: {:?}", e));
|
||||
} else {
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
141
src/main.rs
@@ -1,12 +1,12 @@
|
||||
#![forbid(unsafe_code)]
|
||||
#![cfg_attr(feature = "unstable", feature(ip))]
|
||||
#![recursion_limit = "256"]
|
||||
#![recursion_limit = "512"]
|
||||
|
||||
extern crate openssl;
|
||||
#[macro_use]
|
||||
extern crate rocket;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
@@ -16,6 +16,7 @@ extern crate diesel;
|
||||
#[macro_use]
|
||||
extern crate diesel_migrations;
|
||||
|
||||
use job_scheduler::{Job, JobScheduler};
|
||||
use std::{
|
||||
fs::create_dir_all,
|
||||
panic,
|
||||
@@ -23,10 +24,9 @@ use std::{
|
||||
process::{exit, Command},
|
||||
str::FromStr,
|
||||
thread,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use structopt::StructOpt;
|
||||
|
||||
#[macro_use]
|
||||
mod error;
|
||||
mod api;
|
||||
@@ -40,14 +40,7 @@ mod util;
|
||||
|
||||
pub use config::CONFIG;
|
||||
pub use error::{Error, MapResult};
|
||||
|
||||
#[derive(Debug, StructOpt)]
|
||||
#[structopt(name = "bitwarden_rs", about = "A Bitwarden API server written in Rust")]
|
||||
struct Opt {
|
||||
/// Prints the app version
|
||||
#[structopt(short, long)]
|
||||
version: bool,
|
||||
}
|
||||
pub use util::is_running_in_docker;
|
||||
|
||||
fn main() {
|
||||
parse_args();
|
||||
@@ -57,34 +50,47 @@ fn main() {
|
||||
let level = LF::from_str(&CONFIG.log_level()).expect("Valid log level");
|
||||
init_logging(level).ok();
|
||||
|
||||
let extra_debug = match level {
|
||||
LF::Trace | LF::Debug => true,
|
||||
_ => false,
|
||||
};
|
||||
let extra_debug = matches!(level, LF::Trace | LF::Debug);
|
||||
|
||||
check_data_folder();
|
||||
check_rsa_keys();
|
||||
check_web_vault();
|
||||
|
||||
create_icon_cache_folder();
|
||||
|
||||
launch_rocket(extra_debug);
|
||||
let pool = create_db_pool();
|
||||
schedule_jobs(pool.clone());
|
||||
launch_rocket(pool, extra_debug); // Blocks until program termination.
|
||||
}
|
||||
|
||||
const HELP: &str = "\
|
||||
Alternative implementation of the Bitwarden server API written in Rust
|
||||
|
||||
USAGE:
|
||||
vaultwarden
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
-v, --version Prints the app version
|
||||
";
|
||||
|
||||
fn parse_args() {
|
||||
let opt = Opt::from_args();
|
||||
if opt.version {
|
||||
if let Some(version) = option_env!("BWRS_VERSION") {
|
||||
println!("bitwarden_rs {}", version);
|
||||
} else {
|
||||
println!("bitwarden_rs (Version info from Git not present)");
|
||||
}
|
||||
const NO_VERSION: &str = "(Version info from Git not present)";
|
||||
let mut pargs = pico_args::Arguments::from_env();
|
||||
|
||||
if pargs.contains(["-h", "--help"]) {
|
||||
println!("vaultwarden {}", option_env!("BWRS_VERSION").unwrap_or(NO_VERSION));
|
||||
print!("{}", HELP);
|
||||
exit(0);
|
||||
} else if pargs.contains(["-v", "--version"]) {
|
||||
println!("vaultwarden {}", option_env!("BWRS_VERSION").unwrap_or(NO_VERSION));
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
fn launch_info() {
|
||||
println!("/--------------------------------------------------------------------\\");
|
||||
println!("| Starting Bitwarden_RS |");
|
||||
println!("| Starting Vaultwarden |");
|
||||
|
||||
if let Some(version) = option_env!("BWRS_VERSION") {
|
||||
println!("|{:^68}|", format!("Version {}", version));
|
||||
@@ -96,7 +102,7 @@ fn launch_info() {
|
||||
println!("| Send usage/configuration questions or feature requests to: |");
|
||||
println!("| https://bitwardenrs.discourse.group/ |");
|
||||
println!("| Report suspected bugs/issues in the software itself at: |");
|
||||
println!("| https://github.com/dani-garcia/bitwarden_rs/issues/new |");
|
||||
println!("| https://github.com/dani-garcia/vaultwarden/issues/new |");
|
||||
println!("\\--------------------------------------------------------------------/\n");
|
||||
}
|
||||
|
||||
@@ -121,7 +127,9 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||
// Enable smtp debug logging only specifically for smtp when need.
|
||||
// This can contain sensitive information we do not want in the default debug/trace logging.
|
||||
if CONFIG.smtp_debug() {
|
||||
println!("[WARNING] SMTP Debugging is enabled (SMTP_DEBUG=true). Sensitive information could be disclosed via logs!");
|
||||
println!(
|
||||
"[WARNING] SMTP Debugging is enabled (SMTP_DEBUG=true). Sensitive information could be disclosed via logs!"
|
||||
);
|
||||
println!("[WARNING] Only enable SMTP_DEBUG during troubleshooting!\n");
|
||||
logger = logger.level_for("lettre::transport::smtp", log::LevelFilter::Debug)
|
||||
} else {
|
||||
@@ -199,7 +207,7 @@ fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch {
|
||||
let syslog_fmt = syslog::Formatter3164 {
|
||||
facility: syslog::Facility::LOG_USER,
|
||||
hostname: None,
|
||||
process: "bitwarden_rs".into(),
|
||||
process: "vaultwarden".into(),
|
||||
pid: 0,
|
||||
};
|
||||
|
||||
@@ -212,9 +220,28 @@ fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch {
|
||||
}
|
||||
}
|
||||
|
||||
fn create_dir(path: &str, description: &str) {
|
||||
// Try to create the specified dir, if it doesn't already exist.
|
||||
let err_msg = format!("Error creating {} directory '{}'", description, path);
|
||||
create_dir_all(path).expect(&err_msg);
|
||||
}
|
||||
|
||||
fn create_icon_cache_folder() {
|
||||
// Try to create the icon cache folder, and generate an error if it could not.
|
||||
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache directory");
|
||||
create_dir(&CONFIG.icon_cache_folder(), "icon cache");
|
||||
}
|
||||
|
||||
fn check_data_folder() {
|
||||
let data_folder = &CONFIG.data_folder();
|
||||
let path = Path::new(data_folder);
|
||||
if !path.exists() {
|
||||
error!("Data folder '{}' doesn't exist.", data_folder);
|
||||
if is_running_in_docker() {
|
||||
error!("Verify that your data volume is mounted at the correct location.");
|
||||
} else {
|
||||
error!("Create the data folder and try again.");
|
||||
}
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
fn check_rsa_keys() {
|
||||
@@ -273,22 +300,27 @@ fn check_web_vault() {
|
||||
let index_path = Path::new(&CONFIG.web_vault_folder()).join("index.html");
|
||||
|
||||
if !index_path.exists() {
|
||||
error!("Web vault is not found at '{}'. To install it, please follow the steps in: ", CONFIG.web_vault_folder());
|
||||
error!("https://github.com/dani-garcia/bitwarden_rs/wiki/Building-binary#install-the-web-vault");
|
||||
error!(
|
||||
"Web vault is not found at '{}'. To install it, please follow the steps in: ",
|
||||
CONFIG.web_vault_folder()
|
||||
);
|
||||
error!("https://github.com/dani-garcia/vaultwarden/wiki/Building-binary#install-the-web-vault");
|
||||
error!("You can also set the environment variable 'WEB_VAULT_ENABLED=false' to disable it");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
fn launch_rocket(extra_debug: bool) {
|
||||
let pool = match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()) {
|
||||
fn create_db_pool() -> db::DbPool {
|
||||
match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()) {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
error!("Error creating database pool: {:?}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn launch_rocket(pool: db::DbPool, extra_debug: bool) {
|
||||
let basepath = &CONFIG.domain_path();
|
||||
|
||||
// If adding more paths here, consider also adding them to
|
||||
@@ -303,7 +335,7 @@ fn launch_rocket(extra_debug: bool) {
|
||||
.manage(pool)
|
||||
.manage(api::start_notification_server())
|
||||
.attach(util::AppHeaders())
|
||||
.attach(util::CORS())
|
||||
.attach(util::Cors())
|
||||
.attach(util::BetterLogging(extra_debug))
|
||||
.launch();
|
||||
|
||||
@@ -311,3 +343,40 @@ fn launch_rocket(extra_debug: bool) {
|
||||
// The launch will restore the original logging level
|
||||
error!("Launch error {:#?}", result);
|
||||
}
|
||||
|
||||
fn schedule_jobs(pool: db::DbPool) {
|
||||
if CONFIG.job_poll_interval_ms() == 0 {
|
||||
info!("Job scheduler disabled.");
|
||||
return;
|
||||
}
|
||||
thread::Builder::new()
|
||||
.name("job-scheduler".to_string())
|
||||
.spawn(move || {
|
||||
let mut sched = JobScheduler::new();
|
||||
|
||||
// Purge sends that are past their deletion date.
|
||||
if !CONFIG.send_purge_schedule().is_empty() {
|
||||
sched.add(Job::new(CONFIG.send_purge_schedule().parse().unwrap(), || {
|
||||
api::purge_sends(pool.clone());
|
||||
}));
|
||||
}
|
||||
|
||||
// Purge trashed items that are old enough to be auto-deleted.
|
||||
if !CONFIG.trash_purge_schedule().is_empty() {
|
||||
sched.add(Job::new(CONFIG.trash_purge_schedule().parse().unwrap(), || {
|
||||
api::purge_trashed_ciphers(pool.clone());
|
||||
}));
|
||||
}
|
||||
|
||||
// Periodically check for jobs to run. We probably won't need any
|
||||
// jobs that run more often than once a minute, so a default poll
|
||||
// interval of 30 seconds should be sufficient. Users who want to
|
||||
// schedule jobs to run more frequently for some reason can reduce
|
||||
// the poll interval accordingly.
|
||||
loop {
|
||||
sched.tick();
|
||||
thread::sleep(Duration::from_millis(CONFIG.job_poll_interval_ms()));
|
||||
}
|
||||
})
|
||||
.expect("Error spawning job scheduler thread");
|
||||
}
|
||||
|
@@ -508,7 +508,8 @@
|
||||
"disneymoviesanywhere.com",
|
||||
"go.com",
|
||||
"disney.com",
|
||||
"dadt.com"
|
||||
"dadt.com",
|
||||
"disneyplus.com"
|
||||
],
|
||||
"Excluded": false
|
||||
},
|
||||
@@ -771,7 +772,8 @@
|
||||
"stackoverflow.com",
|
||||
"serverfault.com",
|
||||
"mathoverflow.net",
|
||||
"askubuntu.com"
|
||||
"askubuntu.com",
|
||||
"stackapps.com"
|
||||
],
|
||||
"Excluded": false
|
||||
},
|
||||
@@ -885,5 +887,21 @@
|
||||
"yandex.uz"
|
||||
],
|
||||
"Excluded": false
|
||||
},
|
||||
{
|
||||
"Type": 84,
|
||||
"Domains": [
|
||||
"sonyentertainmentnetwork.com",
|
||||
"sony.com"
|
||||
],
|
||||
"Excluded": false
|
||||
},
|
||||
{
|
||||
"Type": 85,
|
||||
"Domains": [
|
||||
"protonmail.com",
|
||||
"protonvpn.com"
|
||||
],
|
||||
"Excluded": false
|
||||
}
|
||||
]
|
BIN
src/static/images/fallback-icon.png
Normal file
After Width: | Height: | Size: 331 B |
Before Width: | Height: | Size: 9.7 KiB After Width: | Height: | Size: 9.2 KiB |
Before Width: | Height: | Size: 5.8 KiB After Width: | Height: | Size: 5.3 KiB |
Before Width: | Height: | Size: 1.3 KiB After Width: | Height: | Size: 1.3 KiB |
Before Width: | Height: | Size: 1.9 KiB After Width: | Height: | Size: 1.8 KiB |
55
src/static/scripts/bootstrap-native.js
vendored
@@ -1,6 +1,6 @@
|
||||
/*!
|
||||
* Native JavaScript for Bootstrap v3.0.10 (https://thednp.github.io/bootstrap.native/)
|
||||
* Copyright 2015-2020 © dnp_theme
|
||||
* Native JavaScript for Bootstrap v3.0.15 (https://thednp.github.io/bootstrap.native/)
|
||||
* Copyright 2015-2021 © dnp_theme
|
||||
* Licensed under MIT (https://github.com/thednp/bootstrap.native/blob/master/LICENSE)
|
||||
*/
|
||||
(function (global, factory) {
|
||||
@@ -15,10 +15,14 @@
|
||||
|
||||
var transitionDuration = 'webkitTransition' in document.head.style ? 'webkitTransitionDuration' : 'transitionDuration';
|
||||
|
||||
var transitionProperty = 'webkitTransition' in document.head.style ? 'webkitTransitionProperty' : 'transitionProperty';
|
||||
|
||||
function getElementTransitionDuration(element) {
|
||||
var duration = supportTransition ? parseFloat(getComputedStyle(element)[transitionDuration]) : 0;
|
||||
duration = typeof duration === 'number' && !isNaN(duration) ? duration * 1000 : 0;
|
||||
return duration;
|
||||
var computedStyle = getComputedStyle(element),
|
||||
property = computedStyle[transitionProperty],
|
||||
duration = supportTransition && property && property !== 'none'
|
||||
? parseFloat(computedStyle[transitionDuration]) : 0;
|
||||
return !isNaN(duration) ? duration * 1000 : 0;
|
||||
}
|
||||
|
||||
function emulateTransitionEnd(element,handler){
|
||||
@@ -35,9 +39,15 @@
|
||||
return selector instanceof Element ? selector : lookUp.querySelector(selector);
|
||||
}
|
||||
|
||||
function bootstrapCustomEvent(eventName, componentName, related) {
|
||||
function bootstrapCustomEvent(eventName, componentName, eventProperties) {
|
||||
var OriginalCustomEvent = new CustomEvent( eventName + '.bs.' + componentName, {cancelable: true});
|
||||
OriginalCustomEvent.relatedTarget = related;
|
||||
if (typeof eventProperties !== 'undefined') {
|
||||
Object.keys(eventProperties).forEach(function (key) {
|
||||
Object.defineProperty(OriginalCustomEvent, key, {
|
||||
value: eventProperties[key]
|
||||
});
|
||||
});
|
||||
}
|
||||
return OriginalCustomEvent;
|
||||
}
|
||||
|
||||
@@ -352,7 +362,7 @@
|
||||
};
|
||||
self.slideTo = function (next) {
|
||||
if (vars.isSliding) { return; }
|
||||
var activeItem = self.getActiveIndex(), orientation;
|
||||
var activeItem = self.getActiveIndex(), orientation, eventProperties;
|
||||
if ( activeItem === next ) {
|
||||
return;
|
||||
} else if ( (activeItem < next ) || (activeItem === 0 && next === slides.length -1 ) ) {
|
||||
@@ -363,8 +373,9 @@
|
||||
if ( next < 0 ) { next = slides.length - 1; }
|
||||
else if ( next >= slides.length ){ next = 0; }
|
||||
orientation = vars.direction === 'left' ? 'next' : 'prev';
|
||||
slideCustomEvent = bootstrapCustomEvent('slide', 'carousel', slides[next]);
|
||||
slidCustomEvent = bootstrapCustomEvent('slid', 'carousel', slides[next]);
|
||||
eventProperties = { relatedTarget: slides[next], direction: vars.direction, from: activeItem, to: next };
|
||||
slideCustomEvent = bootstrapCustomEvent('slide', 'carousel', eventProperties);
|
||||
slidCustomEvent = bootstrapCustomEvent('slid', 'carousel', eventProperties);
|
||||
dispatchCustomEvent.call(element, slideCustomEvent);
|
||||
if (slideCustomEvent.defaultPrevented) { return; }
|
||||
vars.index = next;
|
||||
@@ -615,7 +626,7 @@
|
||||
}
|
||||
}
|
||||
self.show = function () {
|
||||
showCustomEvent = bootstrapCustomEvent('show', 'dropdown', relatedTarget);
|
||||
showCustomEvent = bootstrapCustomEvent('show', 'dropdown', { relatedTarget: relatedTarget });
|
||||
dispatchCustomEvent.call(parent, showCustomEvent);
|
||||
if ( showCustomEvent.defaultPrevented ) { return; }
|
||||
menu.classList.add('show');
|
||||
@@ -626,12 +637,12 @@
|
||||
setTimeout(function () {
|
||||
setFocus( menu.getElementsByTagName('INPUT')[0] || element );
|
||||
toggleDismiss();
|
||||
shownCustomEvent = bootstrapCustomEvent( 'shown', 'dropdown', relatedTarget);
|
||||
shownCustomEvent = bootstrapCustomEvent('shown', 'dropdown', { relatedTarget: relatedTarget });
|
||||
dispatchCustomEvent.call(parent, shownCustomEvent);
|
||||
},1);
|
||||
};
|
||||
self.hide = function () {
|
||||
hideCustomEvent = bootstrapCustomEvent('hide', 'dropdown', relatedTarget);
|
||||
hideCustomEvent = bootstrapCustomEvent('hide', 'dropdown', { relatedTarget: relatedTarget });
|
||||
dispatchCustomEvent.call(parent, hideCustomEvent);
|
||||
if ( hideCustomEvent.defaultPrevented ) { return; }
|
||||
menu.classList.remove('show');
|
||||
@@ -643,7 +654,7 @@
|
||||
setTimeout(function () {
|
||||
element.Dropdown && element.addEventListener('click',clickHandler,false);
|
||||
},1);
|
||||
hiddenCustomEvent = bootstrapCustomEvent('hidden', 'dropdown', relatedTarget);
|
||||
hiddenCustomEvent = bootstrapCustomEvent('hidden', 'dropdown', { relatedTarget: relatedTarget });
|
||||
dispatchCustomEvent.call(parent, hiddenCustomEvent);
|
||||
};
|
||||
self.toggle = function () {
|
||||
@@ -749,7 +760,7 @@
|
||||
setFocus(modal);
|
||||
modal.isAnimating = false;
|
||||
toggleEvents(1);
|
||||
shownCustomEvent = bootstrapCustomEvent('shown', 'modal', relatedTarget);
|
||||
shownCustomEvent = bootstrapCustomEvent('shown', 'modal', { relatedTarget: relatedTarget });
|
||||
dispatchCustomEvent.call(modal, shownCustomEvent);
|
||||
}
|
||||
function triggerHide(force) {
|
||||
@@ -804,7 +815,7 @@
|
||||
};
|
||||
self.show = function () {
|
||||
if (modal.classList.contains('show') && !!modal.isAnimating ) {return}
|
||||
showCustomEvent = bootstrapCustomEvent('show', 'modal', relatedTarget);
|
||||
showCustomEvent = bootstrapCustomEvent('show', 'modal', { relatedTarget: relatedTarget });
|
||||
dispatchCustomEvent.call(modal, showCustomEvent);
|
||||
if ( showCustomEvent.defaultPrevented ) { return; }
|
||||
modal.isAnimating = true;
|
||||
@@ -1193,7 +1204,7 @@
|
||||
if (dropLink && !dropLink.classList.contains('active') ) {
|
||||
dropLink.classList.add('active');
|
||||
}
|
||||
dispatchCustomEvent.call(element, bootstrapCustomEvent( 'activate', 'scrollspy', vars.items[index]));
|
||||
dispatchCustomEvent.call(element, bootstrapCustomEvent( 'activate', 'scrollspy', { relatedTarget: vars.items[index] }));
|
||||
} else if ( isActive && !inside ) {
|
||||
item.classList.remove('active');
|
||||
if (dropLink && dropLink.classList.contains('active') && !item.parentNode.getElementsByClassName('active').length ) {
|
||||
@@ -1278,7 +1289,7 @@
|
||||
} else {
|
||||
tabs.isAnimating = false;
|
||||
}
|
||||
shownCustomEvent = bootstrapCustomEvent('shown', 'tab', activeTab);
|
||||
shownCustomEvent = bootstrapCustomEvent('shown', 'tab', { relatedTarget: activeTab });
|
||||
dispatchCustomEvent.call(next, shownCustomEvent);
|
||||
}
|
||||
function triggerHide() {
|
||||
@@ -1287,8 +1298,8 @@
|
||||
nextContent.style.float = 'left';
|
||||
containerHeight = activeContent.scrollHeight;
|
||||
}
|
||||
showCustomEvent = bootstrapCustomEvent('show', 'tab', activeTab);
|
||||
hiddenCustomEvent = bootstrapCustomEvent('hidden', 'tab', next);
|
||||
showCustomEvent = bootstrapCustomEvent('show', 'tab', { relatedTarget: activeTab });
|
||||
hiddenCustomEvent = bootstrapCustomEvent('hidden', 'tab', { relatedTarget: next });
|
||||
dispatchCustomEvent.call(next, showCustomEvent);
|
||||
if ( showCustomEvent.defaultPrevented ) { return; }
|
||||
nextContent.classList.add('active');
|
||||
@@ -1331,7 +1342,7 @@
|
||||
nextContent = queryElement(next.getAttribute('href'));
|
||||
activeTab = getActiveTab();
|
||||
activeContent = getActiveContent();
|
||||
hideCustomEvent = bootstrapCustomEvent( 'hide', 'tab', next);
|
||||
hideCustomEvent = bootstrapCustomEvent( 'hide', 'tab', { relatedTarget: next });
|
||||
dispatchCustomEvent.call(activeTab, hideCustomEvent);
|
||||
if (hideCustomEvent.defaultPrevented) { return; }
|
||||
tabs.isAnimating = true;
|
||||
@@ -1637,7 +1648,7 @@
|
||||
}
|
||||
}
|
||||
|
||||
var version = "3.0.10";
|
||||
var version = "3.0.15";
|
||||
|
||||
var index = {
|
||||
Alert: Alert,
|
||||
|
11
src/static/scripts/bootstrap.css
vendored
@@ -1,10 +1,10 @@
|
||||
/*!
|
||||
* Bootstrap v4.5.2 (https://getbootstrap.com/)
|
||||
* Bootstrap v4.5.3 (https://getbootstrap.com/)
|
||||
* Copyright 2011-2020 The Bootstrap Authors
|
||||
* Copyright 2011-2020 Twitter, Inc.
|
||||
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
|
||||
*/
|
||||
:root {
|
||||
:root {
|
||||
--blue: #007bff;
|
||||
--indigo: #6610f2;
|
||||
--purple: #6f42c1;
|
||||
@@ -216,6 +216,7 @@ caption {
|
||||
|
||||
th {
|
||||
text-align: inherit;
|
||||
text-align: -webkit-match-parent;
|
||||
}
|
||||
|
||||
label {
|
||||
@@ -3750,6 +3751,8 @@ input[type="button"].btn-block {
|
||||
display: block;
|
||||
min-height: 1.5rem;
|
||||
padding-left: 1.5rem;
|
||||
-webkit-print-color-adjust: exact;
|
||||
color-adjust: exact;
|
||||
}
|
||||
|
||||
.custom-control-inline {
|
||||
@@ -5289,6 +5292,7 @@ a.badge-dark:focus, a.badge-dark.focus {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
right: 0;
|
||||
z-index: 2;
|
||||
padding: 0.75rem 1.25rem;
|
||||
color: inherit;
|
||||
}
|
||||
@@ -10163,7 +10167,7 @@ a.text-dark:hover, a.text-dark:focus {
|
||||
|
||||
.text-break {
|
||||
word-break: break-word !important;
|
||||
overflow-wrap: break-word !important;
|
||||
word-wrap: break-word !important;
|
||||
}
|
||||
|
||||
.text-reset {
|
||||
@@ -10256,3 +10260,4 @@ a.text-dark:hover, a.text-dark:focus {
|
||||
border-color: #dee2e6;
|
||||
}
|
||||
}
|
||||
/*# sourceMappingURL=bootstrap.css.map */
|
19
src/static/scripts/datatables.css
vendored
@@ -4,12 +4,13 @@
|
||||
*
|
||||
* To rebuild or modify this file with the latest versions of the included
|
||||
* software please visit:
|
||||
* https://datatables.net/download/#bs4/dt-1.10.22
|
||||
* https://datatables.net/download/#bs4/dt-1.10.23
|
||||
*
|
||||
* Included libraries:
|
||||
* DataTables 1.10.22
|
||||
* DataTables 1.10.23
|
||||
*/
|
||||
|
||||
@charset "UTF-8";
|
||||
table.dataTable {
|
||||
clear: both;
|
||||
margin-top: 6px !important;
|
||||
@@ -114,7 +115,7 @@ table.dataTable > thead .sorting_desc:before,
|
||||
table.dataTable > thead .sorting_asc_disabled:before,
|
||||
table.dataTable > thead .sorting_desc_disabled:before {
|
||||
right: 1em;
|
||||
content: "\2191";
|
||||
content: "↑";
|
||||
}
|
||||
table.dataTable > thead .sorting:after,
|
||||
table.dataTable > thead .sorting_asc:after,
|
||||
@@ -122,7 +123,7 @@ table.dataTable > thead .sorting_desc:after,
|
||||
table.dataTable > thead .sorting_asc_disabled:after,
|
||||
table.dataTable > thead .sorting_desc_disabled:after {
|
||||
right: 0.5em;
|
||||
content: "\2193";
|
||||
content: "↓";
|
||||
}
|
||||
table.dataTable > thead .sorting_asc:before,
|
||||
table.dataTable > thead .sorting_desc:after {
|
||||
@@ -165,9 +166,9 @@ div.dataTables_scrollFoot > .dataTables_scrollFootInner > table {
|
||||
|
||||
@media screen and (max-width: 767px) {
|
||||
div.dataTables_wrapper div.dataTables_length,
|
||||
div.dataTables_wrapper div.dataTables_filter,
|
||||
div.dataTables_wrapper div.dataTables_info,
|
||||
div.dataTables_wrapper div.dataTables_paginate {
|
||||
div.dataTables_wrapper div.dataTables_filter,
|
||||
div.dataTables_wrapper div.dataTables_info,
|
||||
div.dataTables_wrapper div.dataTables_paginate {
|
||||
text-align: center;
|
||||
}
|
||||
div.dataTables_wrapper div.dataTables_paginate ul.pagination {
|
||||
@@ -213,10 +214,10 @@ div.dataTables_scrollHead table.table-bordered {
|
||||
div.table-responsive > div.dataTables_wrapper > div.row {
|
||||
margin: 0;
|
||||
}
|
||||
div.table-responsive > div.dataTables_wrapper > div.row > div[class^="col-"]:first-child {
|
||||
div.table-responsive > div.dataTables_wrapper > div.row > div[class^=col-]:first-child {
|
||||
padding-left: 0;
|
||||
}
|
||||
div.table-responsive > div.dataTables_wrapper > div.row > div[class^="col-"]:last-child {
|
||||
div.table-responsive > div.dataTables_wrapper > div.row > div[class^=col-]:last-child {
|
||||
padding-right: 0;
|
||||
}
|
||||
|
||||
|
20
src/static/scripts/datatables.js
vendored
@@ -4,20 +4,20 @@
|
||||
*
|
||||
* To rebuild or modify this file with the latest versions of the included
|
||||
* software please visit:
|
||||
* https://datatables.net/download/#bs4/dt-1.10.22
|
||||
* https://datatables.net/download/#bs4/dt-1.10.23
|
||||
*
|
||||
* Included libraries:
|
||||
* DataTables 1.10.22
|
||||
* DataTables 1.10.23
|
||||
*/
|
||||
|
||||
/*! DataTables 1.10.22
|
||||
/*! DataTables 1.10.23
|
||||
* ©2008-2020 SpryMedia Ltd - datatables.net/license
|
||||
*/
|
||||
|
||||
/**
|
||||
* @summary DataTables
|
||||
* @description Paginate, search and order HTML tables
|
||||
* @version 1.10.22
|
||||
* @version 1.10.23
|
||||
* @file jquery.dataTables.js
|
||||
* @author SpryMedia Ltd
|
||||
* @contact www.datatables.net
|
||||
@@ -2775,7 +2775,7 @@
|
||||
for ( var i=0, iLen=a.length-1 ; i<iLen ; i++ )
|
||||
{
|
||||
// Protect against prototype pollution
|
||||
if (a[i] === '__proto__') {
|
||||
if (a[i] === '__proto__' || a[i] === 'constructor') {
|
||||
throw new Error('Cannot set prototype values');
|
||||
}
|
||||
|
||||
@@ -3157,7 +3157,7 @@
|
||||
cells.push( nTd );
|
||||
|
||||
// Need to create the HTML if new, or if a rendering function is defined
|
||||
if ( create || ((!nTrIn || oCol.mRender || oCol.mData !== i) &&
|
||||
if ( create || ((oCol.mRender || oCol.mData !== i) &&
|
||||
(!$.isPlainObject(oCol.mData) || oCol.mData._ !== i+'.display')
|
||||
)) {
|
||||
nTd.innerHTML = _fnGetCellData( oSettings, iRow, i, 'display' );
|
||||
@@ -3189,10 +3189,6 @@
|
||||
|
||||
_fnCallbackFire( oSettings, 'aoRowCreatedCallback', null, [nTr, rowData, iRow, cells] );
|
||||
}
|
||||
|
||||
// Remove once webkit bug 131819 and Chromium bug 365619 have been resolved
|
||||
// and deployed
|
||||
row.nTr.setAttribute( 'role', 'row' );
|
||||
}
|
||||
|
||||
|
||||
@@ -9546,7 +9542,7 @@
|
||||
* @type string
|
||||
* @default Version number
|
||||
*/
|
||||
DataTable.version = "1.10.22";
|
||||
DataTable.version = "1.10.23";
|
||||
|
||||
/**
|
||||
* Private data store, containing all of the settings objects that are
|
||||
@@ -13970,7 +13966,7 @@
|
||||
*
|
||||
* @type string
|
||||
*/
|
||||
build:"bs4/dt-1.10.22",
|
||||
build:"bs4/dt-1.10.23",
|
||||
|
||||
|
||||
/**
|
||||
|
402
src/static/scripts/md5.js
vendored
@@ -1,402 +0,0 @@
|
||||
/*
|
||||
* JavaScript MD5
|
||||
* https://github.com/blueimp/JavaScript-MD5
|
||||
*
|
||||
* Copyright 2011, Sebastian Tschan
|
||||
* https://blueimp.net
|
||||
*
|
||||
* Licensed under the MIT license:
|
||||
* https://opensource.org/licenses/MIT
|
||||
*
|
||||
* Based on
|
||||
* A JavaScript implementation of the RSA Data Security, Inc. MD5 Message
|
||||
* Digest Algorithm, as defined in RFC 1321.
|
||||
* Version 2.2 Copyright (C) Paul Johnston 1999 - 2009
|
||||
* Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
|
||||
* Distributed under the BSD License
|
||||
* See http://pajhome.org.uk/crypt/md5 for more info.
|
||||
*/
|
||||
|
||||
/* global define */
|
||||
|
||||
/* eslint-disable strict */
|
||||
|
||||
;(function($) {
|
||||
'use strict'
|
||||
|
||||
/**
|
||||
* Add integers, wrapping at 2^32.
|
||||
* This uses 16-bit operations internally to work around bugs in interpreters.
|
||||
*
|
||||
* @param {number} x First integer
|
||||
* @param {number} y Second integer
|
||||
* @returns {number} Sum
|
||||
*/
|
||||
function safeAdd(x, y) {
|
||||
var lsw = (x & 0xffff) + (y & 0xffff)
|
||||
var msw = (x >> 16) + (y >> 16) + (lsw >> 16)
|
||||
return (msw << 16) | (lsw & 0xffff)
|
||||
}
|
||||
|
||||
/**
|
||||
* Bitwise rotate a 32-bit number to the left.
|
||||
*
|
||||
* @param {number} num 32-bit number
|
||||
* @param {number} cnt Rotation count
|
||||
* @returns {number} Rotated number
|
||||
*/
|
||||
function bitRotateLeft(num, cnt) {
|
||||
return (num << cnt) | (num >>> (32 - cnt))
|
||||
}
|
||||
|
||||
/**
|
||||
* Basic operation the algorithm uses.
|
||||
*
|
||||
* @param {number} q q
|
||||
* @param {number} a a
|
||||
* @param {number} b b
|
||||
* @param {number} x x
|
||||
* @param {number} s s
|
||||
* @param {number} t t
|
||||
* @returns {number} Result
|
||||
*/
|
||||
function md5cmn(q, a, b, x, s, t) {
|
||||
return safeAdd(bitRotateLeft(safeAdd(safeAdd(a, q), safeAdd(x, t)), s), b)
|
||||
}
|
||||
/**
|
||||
* Basic operation the algorithm uses.
|
||||
*
|
||||
* @param {number} a a
|
||||
* @param {number} b b
|
||||
* @param {number} c c
|
||||
* @param {number} d d
|
||||
* @param {number} x x
|
||||
* @param {number} s s
|
||||
* @param {number} t t
|
||||
* @returns {number} Result
|
||||
*/
|
||||
function md5ff(a, b, c, d, x, s, t) {
|
||||
return md5cmn((b & c) | (~b & d), a, b, x, s, t)
|
||||
}
|
||||
/**
|
||||
* Basic operation the algorithm uses.
|
||||
*
|
||||
* @param {number} a a
|
||||
* @param {number} b b
|
||||
* @param {number} c c
|
||||
* @param {number} d d
|
||||
* @param {number} x x
|
||||
* @param {number} s s
|
||||
* @param {number} t t
|
||||
* @returns {number} Result
|
||||
*/
|
||||
function md5gg(a, b, c, d, x, s, t) {
|
||||
return md5cmn((b & d) | (c & ~d), a, b, x, s, t)
|
||||
}
|
||||
/**
|
||||
* Basic operation the algorithm uses.
|
||||
*
|
||||
* @param {number} a a
|
||||
* @param {number} b b
|
||||
* @param {number} c c
|
||||
* @param {number} d d
|
||||
* @param {number} x x
|
||||
* @param {number} s s
|
||||
* @param {number} t t
|
||||
* @returns {number} Result
|
||||
*/
|
||||
function md5hh(a, b, c, d, x, s, t) {
|
||||
return md5cmn(b ^ c ^ d, a, b, x, s, t)
|
||||
}
|
||||
/**
|
||||
* Basic operation the algorithm uses.
|
||||
*
|
||||
* @param {number} a a
|
||||
* @param {number} b b
|
||||
* @param {number} c c
|
||||
* @param {number} d d
|
||||
* @param {number} x x
|
||||
* @param {number} s s
|
||||
* @param {number} t t
|
||||
* @returns {number} Result
|
||||
*/
|
||||
function md5ii(a, b, c, d, x, s, t) {
|
||||
return md5cmn(c ^ (b | ~d), a, b, x, s, t)
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the MD5 of an array of little-endian words, and a bit length.
|
||||
*
|
||||
* @param {Array} x Array of little-endian words
|
||||
* @param {number} len Bit length
|
||||
* @returns {Array<number>} MD5 Array
|
||||
*/
|
||||
function binlMD5(x, len) {
|
||||
/* append padding */
|
||||
x[len >> 5] |= 0x80 << len % 32
|
||||
x[(((len + 64) >>> 9) << 4) + 14] = len
|
||||
|
||||
var i
|
||||
var olda
|
||||
var oldb
|
||||
var oldc
|
||||
var oldd
|
||||
var a = 1732584193
|
||||
var b = -271733879
|
||||
var c = -1732584194
|
||||
var d = 271733878
|
||||
|
||||
for (i = 0; i < x.length; i += 16) {
|
||||
olda = a
|
||||
oldb = b
|
||||
oldc = c
|
||||
oldd = d
|
||||
|
||||
a = md5ff(a, b, c, d, x[i], 7, -680876936)
|
||||
d = md5ff(d, a, b, c, x[i + 1], 12, -389564586)
|
||||
c = md5ff(c, d, a, b, x[i + 2], 17, 606105819)
|
||||
b = md5ff(b, c, d, a, x[i + 3], 22, -1044525330)
|
||||
a = md5ff(a, b, c, d, x[i + 4], 7, -176418897)
|
||||
d = md5ff(d, a, b, c, x[i + 5], 12, 1200080426)
|
||||
c = md5ff(c, d, a, b, x[i + 6], 17, -1473231341)
|
||||
b = md5ff(b, c, d, a, x[i + 7], 22, -45705983)
|
||||
a = md5ff(a, b, c, d, x[i + 8], 7, 1770035416)
|
||||
d = md5ff(d, a, b, c, x[i + 9], 12, -1958414417)
|
||||
c = md5ff(c, d, a, b, x[i + 10], 17, -42063)
|
||||
b = md5ff(b, c, d, a, x[i + 11], 22, -1990404162)
|
||||
a = md5ff(a, b, c, d, x[i + 12], 7, 1804603682)
|
||||
d = md5ff(d, a, b, c, x[i + 13], 12, -40341101)
|
||||
c = md5ff(c, d, a, b, x[i + 14], 17, -1502002290)
|
||||
b = md5ff(b, c, d, a, x[i + 15], 22, 1236535329)
|
||||
|
||||
a = md5gg(a, b, c, d, x[i + 1], 5, -165796510)
|
||||
d = md5gg(d, a, b, c, x[i + 6], 9, -1069501632)
|
||||
c = md5gg(c, d, a, b, x[i + 11], 14, 643717713)
|
||||
b = md5gg(b, c, d, a, x[i], 20, -373897302)
|
||||
a = md5gg(a, b, c, d, x[i + 5], 5, -701558691)
|
||||
d = md5gg(d, a, b, c, x[i + 10], 9, 38016083)
|
||||
c = md5gg(c, d, a, b, x[i + 15], 14, -660478335)
|
||||
b = md5gg(b, c, d, a, x[i + 4], 20, -405537848)
|
||||
a = md5gg(a, b, c, d, x[i + 9], 5, 568446438)
|
||||
d = md5gg(d, a, b, c, x[i + 14], 9, -1019803690)
|
||||
c = md5gg(c, d, a, b, x[i + 3], 14, -187363961)
|
||||
b = md5gg(b, c, d, a, x[i + 8], 20, 1163531501)
|
||||
a = md5gg(a, b, c, d, x[i + 13], 5, -1444681467)
|
||||
d = md5gg(d, a, b, c, x[i + 2], 9, -51403784)
|
||||
c = md5gg(c, d, a, b, x[i + 7], 14, 1735328473)
|
||||
b = md5gg(b, c, d, a, x[i + 12], 20, -1926607734)
|
||||
|
||||
a = md5hh(a, b, c, d, x[i + 5], 4, -378558)
|
||||
d = md5hh(d, a, b, c, x[i + 8], 11, -2022574463)
|
||||
c = md5hh(c, d, a, b, x[i + 11], 16, 1839030562)
|
||||
b = md5hh(b, c, d, a, x[i + 14], 23, -35309556)
|
||||
a = md5hh(a, b, c, d, x[i + 1], 4, -1530992060)
|
||||
d = md5hh(d, a, b, c, x[i + 4], 11, 1272893353)
|
||||
c = md5hh(c, d, a, b, x[i + 7], 16, -155497632)
|
||||
b = md5hh(b, c, d, a, x[i + 10], 23, -1094730640)
|
||||
a = md5hh(a, b, c, d, x[i + 13], 4, 681279174)
|
||||
d = md5hh(d, a, b, c, x[i], 11, -358537222)
|
||||
c = md5hh(c, d, a, b, x[i + 3], 16, -722521979)
|
||||
b = md5hh(b, c, d, a, x[i + 6], 23, 76029189)
|
||||
a = md5hh(a, b, c, d, x[i + 9], 4, -640364487)
|
||||
d = md5hh(d, a, b, c, x[i + 12], 11, -421815835)
|
||||
c = md5hh(c, d, a, b, x[i + 15], 16, 530742520)
|
||||
b = md5hh(b, c, d, a, x[i + 2], 23, -995338651)
|
||||
|
||||
a = md5ii(a, b, c, d, x[i], 6, -198630844)
|
||||
d = md5ii(d, a, b, c, x[i + 7], 10, 1126891415)
|
||||
c = md5ii(c, d, a, b, x[i + 14], 15, -1416354905)
|
||||
b = md5ii(b, c, d, a, x[i + 5], 21, -57434055)
|
||||
a = md5ii(a, b, c, d, x[i + 12], 6, 1700485571)
|
||||
d = md5ii(d, a, b, c, x[i + 3], 10, -1894986606)
|
||||
c = md5ii(c, d, a, b, x[i + 10], 15, -1051523)
|
||||
b = md5ii(b, c, d, a, x[i + 1], 21, -2054922799)
|
||||
a = md5ii(a, b, c, d, x[i + 8], 6, 1873313359)
|
||||
d = md5ii(d, a, b, c, x[i + 15], 10, -30611744)
|
||||
c = md5ii(c, d, a, b, x[i + 6], 15, -1560198380)
|
||||
b = md5ii(b, c, d, a, x[i + 13], 21, 1309151649)
|
||||
a = md5ii(a, b, c, d, x[i + 4], 6, -145523070)
|
||||
d = md5ii(d, a, b, c, x[i + 11], 10, -1120210379)
|
||||
c = md5ii(c, d, a, b, x[i + 2], 15, 718787259)
|
||||
b = md5ii(b, c, d, a, x[i + 9], 21, -343485551)
|
||||
|
||||
a = safeAdd(a, olda)
|
||||
b = safeAdd(b, oldb)
|
||||
c = safeAdd(c, oldc)
|
||||
d = safeAdd(d, oldd)
|
||||
}
|
||||
return [a, b, c, d]
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert an array of little-endian words to a string
|
||||
*
|
||||
* @param {Array<number>} input MD5 Array
|
||||
* @returns {string} MD5 string
|
||||
*/
|
||||
function binl2rstr(input) {
|
||||
var i
|
||||
var output = ''
|
||||
var length32 = input.length * 32
|
||||
for (i = 0; i < length32; i += 8) {
|
||||
output += String.fromCharCode((input[i >> 5] >>> i % 32) & 0xff)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a raw string to an array of little-endian words
|
||||
* Characters >255 have their high-byte silently ignored.
|
||||
*
|
||||
* @param {string} input Raw input string
|
||||
* @returns {Array<number>} Array of little-endian words
|
||||
*/
|
||||
function rstr2binl(input) {
|
||||
var i
|
||||
var output = []
|
||||
output[(input.length >> 2) - 1] = undefined
|
||||
for (i = 0; i < output.length; i += 1) {
|
||||
output[i] = 0
|
||||
}
|
||||
var length8 = input.length * 8
|
||||
for (i = 0; i < length8; i += 8) {
|
||||
output[i >> 5] |= (input.charCodeAt(i / 8) & 0xff) << i % 32
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the MD5 of a raw string
|
||||
*
|
||||
* @param {string} s Input string
|
||||
* @returns {string} Raw MD5 string
|
||||
*/
|
||||
function rstrMD5(s) {
|
||||
return binl2rstr(binlMD5(rstr2binl(s), s.length * 8))
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the HMAC-MD5 of a key and some data (raw strings)
|
||||
*
|
||||
* @param {string} key HMAC key
|
||||
* @param {string} data Raw input string
|
||||
* @returns {string} Raw MD5 string
|
||||
*/
|
||||
function rstrHMACMD5(key, data) {
|
||||
var i
|
||||
var bkey = rstr2binl(key)
|
||||
var ipad = []
|
||||
var opad = []
|
||||
var hash
|
||||
ipad[15] = opad[15] = undefined
|
||||
if (bkey.length > 16) {
|
||||
bkey = binlMD5(bkey, key.length * 8)
|
||||
}
|
||||
for (i = 0; i < 16; i += 1) {
|
||||
ipad[i] = bkey[i] ^ 0x36363636
|
||||
opad[i] = bkey[i] ^ 0x5c5c5c5c
|
||||
}
|
||||
hash = binlMD5(ipad.concat(rstr2binl(data)), 512 + data.length * 8)
|
||||
return binl2rstr(binlMD5(opad.concat(hash), 512 + 128))
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a raw string to a hex string
|
||||
*
|
||||
* @param {string} input Raw input string
|
||||
* @returns {string} Hex encoded string
|
||||
*/
|
||||
function rstr2hex(input) {
|
||||
var hexTab = '0123456789abcdef'
|
||||
var output = ''
|
||||
var x
|
||||
var i
|
||||
for (i = 0; i < input.length; i += 1) {
|
||||
x = input.charCodeAt(i)
|
||||
output += hexTab.charAt((x >>> 4) & 0x0f) + hexTab.charAt(x & 0x0f)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a string as UTF-8
|
||||
*
|
||||
* @param {string} input Input string
|
||||
* @returns {string} UTF8 string
|
||||
*/
|
||||
function str2rstrUTF8(input) {
|
||||
return unescape(encodeURIComponent(input))
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes input string as raw MD5 string
|
||||
*
|
||||
* @param {string} s Input string
|
||||
* @returns {string} Raw MD5 string
|
||||
*/
|
||||
function rawMD5(s) {
|
||||
return rstrMD5(str2rstrUTF8(s))
|
||||
}
|
||||
/**
|
||||
* Encodes input string as Hex encoded string
|
||||
*
|
||||
* @param {string} s Input string
|
||||
* @returns {string} Hex encoded string
|
||||
*/
|
||||
function hexMD5(s) {
|
||||
return rstr2hex(rawMD5(s))
|
||||
}
|
||||
/**
|
||||
* Calculates the raw HMAC-MD5 for the given key and data
|
||||
*
|
||||
* @param {string} k HMAC key
|
||||
* @param {string} d Input string
|
||||
* @returns {string} Raw MD5 string
|
||||
*/
|
||||
function rawHMACMD5(k, d) {
|
||||
return rstrHMACMD5(str2rstrUTF8(k), str2rstrUTF8(d))
|
||||
}
|
||||
/**
|
||||
* Calculates the Hex encoded HMAC-MD5 for the given key and data
|
||||
*
|
||||
* @param {string} k HMAC key
|
||||
* @param {string} d Input string
|
||||
* @returns {string} Raw MD5 string
|
||||
*/
|
||||
function hexHMACMD5(k, d) {
|
||||
return rstr2hex(rawHMACMD5(k, d))
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates MD5 value for a given string.
|
||||
* If a key is provided, calculates the HMAC-MD5 value.
|
||||
* Returns a Hex encoded string unless the raw argument is given.
|
||||
*
|
||||
* @param {string} string Input string
|
||||
* @param {string} [key] HMAC key
|
||||
* @param {boolean} [raw] Raw output switch
|
||||
* @returns {string} MD5 output
|
||||
*/
|
||||
function md5(string, key, raw) {
|
||||
if (!key) {
|
||||
if (!raw) {
|
||||
return hexMD5(string)
|
||||
}
|
||||
return rawMD5(string)
|
||||
}
|
||||
if (!raw) {
|
||||
return hexHMACMD5(key, string)
|
||||
}
|
||||
return rawHMACMD5(key, string)
|
||||
}
|
||||
|
||||
if (typeof define === 'function' && define.amd) {
|
||||
define(function() {
|
||||
return md5
|
||||
})
|
||||
} else if (typeof module === 'object' && module.exports) {
|
||||
module.exports = md5
|
||||
} else {
|
||||
$.md5 = md5
|
||||
}
|
||||
})(this)
|
@@ -4,7 +4,8 @@
|
||||
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no" />
|
||||
<meta name="robots" content="noindex,nofollow" />
|
||||
<title>Bitwarden_rs Admin Panel</title>
|
||||
<link rel="icon" type="image/png" href="{{urlpath}}/bwrs_static/shield-white.png">
|
||||
<title>Vaultwarden Admin Panel</title>
|
||||
<link rel="stylesheet" href="{{urlpath}}/bwrs_static/bootstrap.css" />
|
||||
<style>
|
||||
body {
|
||||
@@ -19,7 +20,6 @@
|
||||
width: auto;
|
||||
}
|
||||
</style>
|
||||
<script src="{{urlpath}}/bwrs_static/md5.js"></script>
|
||||
<script src="{{urlpath}}/bwrs_static/identicon.js"></script>
|
||||
<script>
|
||||
function reload() { window.location.reload(); }
|
||||
@@ -27,8 +27,17 @@
|
||||
text && alert(text);
|
||||
reload_page && reload();
|
||||
}
|
||||
function identicon(email) {
|
||||
const data = new Identicon(md5(email), { size: 48, format: 'svg' });
|
||||
async function sha256(message) {
|
||||
// https://developer.mozilla.org/en-US/docs/Web/API/SubtleCrypto/digest
|
||||
const msgUint8 = new TextEncoder().encode(message);
|
||||
const hashBuffer = await crypto.subtle.digest('SHA-256', msgUint8);
|
||||
const hashArray = Array.from(new Uint8Array(hashBuffer));
|
||||
const hashHex = hashArray.map(b => b.toString(16).padStart(2, '0')).join('');
|
||||
return hashHex;
|
||||
}
|
||||
async function identicon(email) {
|
||||
const hash = await sha256(email);
|
||||
const data = new Identicon(hash, { size: 48, format: 'svg' });
|
||||
return "data:image/svg+xml;base64," + data.toString();
|
||||
}
|
||||
function toggleVis(input_id) {
|
||||
@@ -73,8 +82,8 @@
|
||||
|
||||
<body class="bg-light">
|
||||
<nav class="navbar navbar-expand-md navbar-dark bg-dark mb-4 shadow fixed-top">
|
||||
<div class="container">
|
||||
<a class="navbar-brand" href="{{urlpath}}/admin"><img class="pr-1" src="{{urlpath}}/bwrs_static/shield-white.png">Bitwarden_rs Admin</a>
|
||||
<div class="container-xl">
|
||||
<a class="navbar-brand" href="{{urlpath}}/admin"><img class="pr-1" src="{{urlpath}}/bwrs_static/shield-white.png">Vaultwarden Admin</a>
|
||||
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarCollapse"
|
||||
aria-controls="navbarCollapse" aria-expanded="false" aria-label="Toggle navigation">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
@@ -96,7 +105,7 @@
|
||||
</li>
|
||||
{{/if}}
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="{{urlpath}}/">Vault</a>
|
||||
<a class="nav-link" href="{{urlpath}}/" target="_blank" rel="noreferrer">Vault</a>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -124,4 +133,4 @@
|
||||
<!-- This script needs to be at the bottom, else it will fail! -->
|
||||
<script src="{{urlpath}}/bwrs_static/bootstrap-native.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
|
@@ -1,8 +1,8 @@
|
||||
<main class="container">
|
||||
<main class="container-xl">
|
||||
<div id="diagnostics-block" class="my-3 p-3 bg-white rounded shadow">
|
||||
<h6 class="border-bottom pb-2 mb-2">Diagnostics</h6>
|
||||
|
||||
<h3>Version</h3>
|
||||
<h3>Versions</h3>
|
||||
<div class="row">
|
||||
<div class="col-md">
|
||||
<dl class="row">
|
||||
@@ -15,11 +15,12 @@
|
||||
<span id="server-installed">{{version}}</span>
|
||||
</dd>
|
||||
<dt class="col-sm-5">Server Latest
|
||||
<span class="badge badge-danger d-none" id="server-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
<span class="badge badge-secondary d-none" id="server-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="server-latest">{{diagnostics.latest_release}}<span id="server-latest-commit" class="d-none">-{{diagnostics.latest_commit}}</span></span>
|
||||
</dd>
|
||||
{{#if diagnostics.web_vault_enabled}}
|
||||
<dt class="col-sm-5">Web Installed
|
||||
<span class="badge badge-success d-none" id="web-success" title="Latest version is installed.">Ok</span>
|
||||
<span class="badge badge-warning d-none" id="web-warning" title="There seems to be an update available.">Update</span>
|
||||
@@ -27,12 +28,25 @@
|
||||
<dd class="col-sm-7">
|
||||
<span id="web-installed">{{diagnostics.web_vault_version}}</span>
|
||||
</dd>
|
||||
{{#unless diagnostics.running_within_docker}}
|
||||
<dt class="col-sm-5">Web Latest
|
||||
<span class="badge badge-danger d-none" id="web-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
<span class="badge badge-secondary d-none" id="web-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="web-latest">{{diagnostics.latest_web_build}}</span>
|
||||
</dd>
|
||||
{{/unless}}
|
||||
{{/if}}
|
||||
{{#unless diagnostics.web_vault_enabled}}
|
||||
<dt class="col-sm-5">Web Installed</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="web-installed">Web Vault is disabled</span>
|
||||
</dd>
|
||||
{{/unless}}
|
||||
<dt class="col-sm-5">Database</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span><b>{{diagnostics.db_type}}:</b> {{diagnostics.db_version}}</span>
|
||||
</dd>
|
||||
</dl>
|
||||
</div>
|
||||
</div>
|
||||
@@ -41,6 +55,70 @@
|
||||
<div class="row">
|
||||
<div class="col-md">
|
||||
<dl class="row">
|
||||
<dt class="col-sm-5">Running within Docker</dt>
|
||||
<dd class="col-sm-7">
|
||||
{{#if diagnostics.running_within_docker}}
|
||||
<span class="d-block"><b>Yes</b></span>
|
||||
{{/if}}
|
||||
{{#unless diagnostics.running_within_docker}}
|
||||
<span class="d-block"><b>No</b></span>
|
||||
{{/unless}}
|
||||
</dd>
|
||||
<dt class="col-sm-5">Uses a reverse proxy</dt>
|
||||
<dd class="col-sm-7">
|
||||
{{#if diagnostics.ip_header_exists}}
|
||||
<span class="d-block" title="IP Header found."><b>Yes</b></span>
|
||||
{{/if}}
|
||||
{{#unless diagnostics.ip_header_exists}}
|
||||
<span class="d-block" title="No IP Header found."><b>No</b></span>
|
||||
{{/unless}}
|
||||
</dd>
|
||||
{{!-- Only show this if the IP Header Exists --}}
|
||||
{{#if diagnostics.ip_header_exists}}
|
||||
<dt class="col-sm-5">IP header
|
||||
{{#if diagnostics.ip_header_match}}
|
||||
<span class="badge badge-success" title="IP_HEADER config seems to be valid.">Match</span>
|
||||
{{/if}}
|
||||
{{#unless diagnostics.ip_header_match}}
|
||||
<span class="badge badge-danger" title="IP_HEADER config seems to be invalid. IP's in the log could be invalid. Please fix.">No Match</span>
|
||||
{{/unless}}
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
{{#if diagnostics.ip_header_match}}
|
||||
<span class="d-block"><b>Config/Server:</b> {{ diagnostics.ip_header_name }}</span>
|
||||
{{/if}}
|
||||
{{#unless diagnostics.ip_header_match}}
|
||||
<span class="d-block"><b>Config:</b> {{ diagnostics.ip_header_config }}</span>
|
||||
<span class="d-block"><b>Server:</b> {{ diagnostics.ip_header_name }}</span>
|
||||
{{/unless}}
|
||||
</dd>
|
||||
{{/if}}
|
||||
{{!-- End if IP Header Exists --}}
|
||||
<dt class="col-sm-5">Internet access
|
||||
{{#if diagnostics.has_http_access}}
|
||||
<span class="badge badge-success" title="We have internet access!">Ok</span>
|
||||
{{/if}}
|
||||
{{#unless diagnostics.has_http_access}}
|
||||
<span class="badge badge-danger" title="There seems to be no internet access. Please fix.">Error</span>
|
||||
{{/unless}}
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
{{#if diagnostics.has_http_access}}
|
||||
<span class="d-block"><b>Yes</b></span>
|
||||
{{/if}}
|
||||
{{#unless diagnostics.has_http_access}}
|
||||
<span class="d-block"><b>No</b></span>
|
||||
{{/unless}}
|
||||
</dd>
|
||||
<dt class="col-sm-5">Internet access via a proxy</dt>
|
||||
<dd class="col-sm-7">
|
||||
{{#if diagnostics.uses_proxy}}
|
||||
<span class="d-block" title="Internet access goes via a proxy (HTTPS_PROXY or HTTP_PROXY is configured)."><b>Yes</b></span>
|
||||
{{/if}}
|
||||
{{#unless diagnostics.uses_proxy}}
|
||||
<span class="d-block" title="We have direct internet access, no outgoing proxy configured."><b>No</b></span>
|
||||
{{/unless}}
|
||||
</dd>
|
||||
<dt class="col-sm-5">DNS (github.com)
|
||||
<span class="badge badge-success d-none" id="dns-success" title="DNS Resolving works!">Ok</span>
|
||||
<span class="badge badge-danger d-none" id="dns-warning" title="DNS Resolving failed. Please fix.">Error</span>
|
||||
@@ -48,7 +126,10 @@
|
||||
<dd class="col-sm-7">
|
||||
<span id="dns-resolved">{{diagnostics.dns_resolved}}</span>
|
||||
</dd>
|
||||
|
||||
<dt class="col-sm-5">Date & Time (Local)</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span><b>Server:</b> {{diagnostics.server_time_local}}</span>
|
||||
</dd>
|
||||
<dt class="col-sm-5">Date & Time (UTC)
|
||||
<span class="badge badge-success d-none" id="time-success" title="Time offsets seem to be correct.">Ok</span>
|
||||
<span class="badge badge-danger d-none" id="time-warning" title="Time offsets are too mouch at drift.">Error</span>
|
||||
@@ -57,6 +138,46 @@
|
||||
<span id="time-server" class="d-block"><b>Server:</b> <span id="time-server-string">{{diagnostics.server_time}}</span></span>
|
||||
<span id="time-browser" class="d-block"><b>Browser:</b> <span id="time-browser-string"></span></span>
|
||||
</dd>
|
||||
|
||||
<dt class="col-sm-5">Domain configuration
|
||||
<span class="badge badge-success d-none" id="domain-success" title="The domain variable matches the browser location and seems to be configured correctly.">Match</span>
|
||||
<span class="badge badge-danger d-none" id="domain-warning" title="The domain variable does not matches the browsers location.
The domain variable does not seem to be configured correctly.
Some features may not work as expected!">No Match</span>
|
||||
<span class="badge badge-success d-none" id="https-success" title="Configurued to use HTTPS">HTTPS</span>
|
||||
<span class="badge badge-danger d-none" id="https-warning" title="Not configured to use HTTPS.
Some features may not work as expected!">No HTTPS</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="domain-server" class="d-block"><b>Server:</b> <span id="domain-server-string">{{diagnostics.admin_url}}</span></span>
|
||||
<span id="domain-browser" class="d-block"><b>Browser:</b> <span id="domain-browser-string"></span></span>
|
||||
</dd>
|
||||
</dl>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h3>Support</h3>
|
||||
<div class="row">
|
||||
<div class="col-md">
|
||||
<dl class="row">
|
||||
<dd class="col-sm-12">
|
||||
If you need support please check the following links first before you create a new issue:
|
||||
<a href="https://bitwardenrs.discourse.group/" target="_blank" rel="noreferrer">Vaultwarden Forum</a>
|
||||
| <a href="https://github.com/dani-garcia/vaultwarden/discussions" target="_blank" rel="noreferrer">Github Discussions</a>
|
||||
</dd>
|
||||
</dl>
|
||||
<dl class="row">
|
||||
<dd class="col-sm-12">
|
||||
You can use the button below to pre-generate a string which you can copy/paste on either the Forum or when Creating a new issue at Github.<br>
|
||||
We try to hide the most sensitive values from the generated support string by default, but please verify if there is nothing in there which you want to hide!<br>
|
||||
</dd>
|
||||
</dl>
|
||||
<dl class="row">
|
||||
<dt class="col-sm-3">
|
||||
<button type="button" id="gen-support" class="btn btn-primary" onclick="generateSupportString(); return false;">Generate Support String</button>
|
||||
<br><br>
|
||||
<button type="button" id="copy-support" class="btn btn-info d-none" onclick="copyToClipboard(); return false;">Copy To Clipboard</button>
|
||||
</dt>
|
||||
<dd class="col-sm-9">
|
||||
<pre id="support-string" class="pre-scrollable d-none" style="width: 100%; height: 16em; size: 0.6em; border: 1px solid; padding: 4px;"></pre>
|
||||
</dd>
|
||||
</dl>
|
||||
</div>
|
||||
</div>
|
||||
@@ -64,7 +185,13 @@
|
||||
</main>
|
||||
|
||||
<script>
|
||||
dnsCheck = false;
|
||||
timeCheck = false;
|
||||
domainCheck = false;
|
||||
httpsCheck = false;
|
||||
(() => {
|
||||
// ================================
|
||||
// Date & Time Check
|
||||
const d = new Date();
|
||||
const year = d.getUTCFullYear();
|
||||
const month = String(d.getUTCMonth()+1).padStart(2, '0');
|
||||
@@ -81,16 +208,21 @@
|
||||
document.getElementById('time-warning').classList.remove('d-none');
|
||||
} else {
|
||||
document.getElementById('time-success').classList.remove('d-none');
|
||||
timeCheck = true;
|
||||
}
|
||||
|
||||
// ================================
|
||||
// Check if the output is a valid IP
|
||||
const isValidIp = value => (/^(?:(?:^|\.)(?:2(?:5[0-5]|[0-4]\d)|1?\d?\d)){4}$/.test(value) ? true : false);
|
||||
if (isValidIp(document.getElementById('dns-resolved').innerText)) {
|
||||
document.getElementById('dns-success').classList.remove('d-none');
|
||||
dnsCheck = true;
|
||||
} else {
|
||||
document.getElementById('dns-warning').classList.remove('d-none');
|
||||
}
|
||||
|
||||
// ================================
|
||||
// Version check for both vaultwarden and web-vault
|
||||
let serverInstalled = document.getElementById('server-installed').innerText;
|
||||
let serverLatest = document.getElementById('server-latest').innerText;
|
||||
let serverLatestCommit = document.getElementById('server-latest-commit').innerText.replace('-', '');
|
||||
@@ -99,10 +231,12 @@
|
||||
}
|
||||
|
||||
const webInstalled = document.getElementById('web-installed').innerText;
|
||||
const webLatest = document.getElementById('web-latest').innerText;
|
||||
|
||||
checkVersions('server', serverInstalled, serverLatest, serverLatestCommit);
|
||||
|
||||
{{#unless diagnostics.running_within_docker}}
|
||||
const webLatest = document.getElementById('web-latest').innerText;
|
||||
checkVersions('web', webInstalled, webLatest);
|
||||
{{/unless}}
|
||||
|
||||
function checkVersions(platform, installed, latest, commit=null) {
|
||||
if (installed === '-' || latest === '-') {
|
||||
@@ -146,5 +280,70 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ================================
|
||||
// Check valid DOMAIN configuration
|
||||
document.getElementById('domain-browser-string').innerText = location.href.toLowerCase();
|
||||
if (document.getElementById('domain-server-string').innerText.toLowerCase() == location.href.toLowerCase()) {
|
||||
document.getElementById('domain-success').classList.remove('d-none');
|
||||
domainCheck = true;
|
||||
} else {
|
||||
document.getElementById('domain-warning').classList.remove('d-none');
|
||||
}
|
||||
|
||||
// Check for HTTPS at domain-server-string
|
||||
if (document.getElementById('domain-server-string').innerText.toLowerCase().startsWith('https://') ) {
|
||||
document.getElementById('https-success').classList.remove('d-none');
|
||||
httpsCheck = true;
|
||||
} else {
|
||||
document.getElementById('https-warning').classList.remove('d-none');
|
||||
}
|
||||
})();
|
||||
|
||||
// ================================
|
||||
// Generate support string to be pasted on github or the forum
|
||||
async function generateSupportString() {
|
||||
supportString = "### Your environment (Generated via diagnostics page)\n";
|
||||
|
||||
supportString += "* Vaultwarden version: v{{ version }}\n";
|
||||
supportString += "* Web-vault version: v{{ diagnostics.web_vault_version }}\n";
|
||||
supportString += "* Running within Docker: {{ diagnostics.running_within_docker }}\n";
|
||||
supportString += "* Uses a reverse proxy: {{ diagnostics.ip_header_exists }}\n";
|
||||
{{#if diagnostics.ip_header_exists}}
|
||||
supportString += "* IP Header check: {{ diagnostics.ip_header_match }} ({{ diagnostics.ip_header_name }})\n";
|
||||
{{/if}}
|
||||
supportString += "* Internet access: {{ diagnostics.has_http_access }}\n";
|
||||
supportString += "* Internet access via a proxy: {{ diagnostics.uses_proxy }}\n";
|
||||
supportString += "* DNS Check: " + dnsCheck + "\n";
|
||||
supportString += "* Time Check: " + timeCheck + "\n";
|
||||
supportString += "* Domain Configuration Check: " + domainCheck + "\n";
|
||||
supportString += "* HTTPS Check: " + httpsCheck + "\n";
|
||||
supportString += "* Database type: {{ diagnostics.db_type }}\n";
|
||||
supportString += "* Database version: {{ diagnostics.db_version }}\n";
|
||||
supportString += "* Clients used: \n";
|
||||
supportString += "* Reverse proxy and version: \n";
|
||||
supportString += "* Other relevant information: \n";
|
||||
|
||||
jsonResponse = await fetch('{{urlpath}}/admin/diagnostics/config');
|
||||
configJson = await jsonResponse.json();
|
||||
supportString += "\n### Config (Generated via diagnostics page)\n```json\n" + JSON.stringify(configJson, undefined, 2) + "\n```\n";
|
||||
|
||||
document.getElementById('support-string').innerText = supportString;
|
||||
document.getElementById('support-string').classList.remove('d-none');
|
||||
document.getElementById('copy-support').classList.remove('d-none');
|
||||
}
|
||||
|
||||
function copyToClipboard() {
|
||||
const str = document.getElementById('support-string').innerText;
|
||||
const el = document.createElement('textarea');
|
||||
el.value = str;
|
||||
el.setAttribute('readonly', '');
|
||||
el.style.position = 'absolute';
|
||||
el.style.left = '-9999px';
|
||||
document.body.appendChild(el);
|
||||
el.select();
|
||||
document.execCommand('copy');
|
||||
document.body.removeChild(el);
|
||||
}
|
||||
|
||||
</script>
|
||||
|
@@ -1,4 +1,4 @@
|
||||
<main class="container">
|
||||
<main class="container-xl">
|
||||
{{#if error}}
|
||||
<div class="align-items-center p-3 mb-3 text-white-50 bg-warning rounded shadow">
|
||||
<div>
|
||||
|
@@ -1,4 +1,4 @@
|
||||
<main class="container">
|
||||
<main class="container-xl">
|
||||
<div id="organizations-block" class="my-3 p-3 bg-white rounded shadow">
|
||||
<h6 class="border-bottom pb-2 mb-3">Organizations</h6>
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
<th>Users</th>
|
||||
<th>Items</th>
|
||||
<th>Attachments</th>
|
||||
<th style="width: 120px; min-width: 120px;">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
@@ -37,6 +38,9 @@
|
||||
<span class="d-block"><strong>Size:</strong> {{attachment_size}}</span>
|
||||
{{/if}}
|
||||
</td>
|
||||
<td style="font-size: 90%; text-align: right; padding-right: 15px">
|
||||
<a class="d-block" href="#" onclick='deleteOrganization({{jsesc Id}}, {{jsesc Name}}, {{jsesc BillingEmail}})'>Delete Organization</a>
|
||||
</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</tbody>
|
||||
@@ -50,15 +54,39 @@
|
||||
<script src="{{urlpath}}/bwrs_static/jquery-3.5.1.slim.js"></script>
|
||||
<script src="{{urlpath}}/bwrs_static/datatables.js"></script>
|
||||
<script>
|
||||
document.querySelectorAll("img.identicon").forEach(function (e, i) {
|
||||
e.src = identicon(e.dataset.src);
|
||||
});
|
||||
function deleteOrganization(id, name, billing_email) {
|
||||
// First make sure the user wants to delete this organization
|
||||
var continueDelete = confirm("WARNING: All data of this organization ("+ name +") will be lost!\nMake sure you have a backup, this cannot be undone!");
|
||||
if (continueDelete == true) {
|
||||
var input_org_uuid = prompt("To delete the organization '" + name + " (" + billing_email +")', please type the organization uuid below.")
|
||||
if (input_org_uuid != null) {
|
||||
if (input_org_uuid == id) {
|
||||
_post("{{urlpath}}/admin/organizations/" + id + "/delete",
|
||||
"Organization deleted correctly",
|
||||
"Error deleting organization");
|
||||
} else {
|
||||
alert("Wrong organization uuid, please try again")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
(async () => {
|
||||
for (let e of document.querySelectorAll("img.identicon")) {
|
||||
e.src = await identicon(e.dataset.src);
|
||||
}
|
||||
})();
|
||||
|
||||
document.addEventListener("DOMContentLoaded", function(event) {
|
||||
$('#orgs-table').DataTable({
|
||||
"responsive": true,
|
||||
"lengthMenu": [ [-1, 5, 10, 25, 50], ["All", 5, 10, 25, 50] ],
|
||||
"pageLength": -1, // Default show all
|
||||
"columnDefs": [
|
||||
{ "targets": 4, "searchable": false, "orderable": false }
|
||||
]
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</script>
|
||||
|
@@ -1,4 +1,4 @@
|
||||
<main class="container">
|
||||
<main class="container-xl">
|
||||
<div id="config-block" class="align-items-center p-3 mb-3 bg-secondary rounded shadow">
|
||||
<div>
|
||||
<h6 class="text-white mb-3">Configuration</h6>
|
||||
@@ -116,7 +116,11 @@
|
||||
data-target="#g_database">Backup Database</button></div>
|
||||
<div id="g_database" class="card-body collapse" data-parent="#config-form">
|
||||
<div class="small mb-3">
|
||||
NOTE: A local installation of sqlite3 is required for this section to work.
|
||||
WARNING: This function only creates a backup copy of the SQLite database.
|
||||
This does not include any configuration or file attachment data that may
|
||||
also be needed to fully restore a vaultwarden instance. For details on
|
||||
how to perform complete backups, refer to the wiki page on
|
||||
<a href="https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault">backups</a>.
|
||||
</div>
|
||||
<button type="button" class="btn btn-primary" onclick="backupDatabase();">Backup Database</button>
|
||||
</div>
|
||||
|
@@ -1,4 +1,4 @@
|
||||
<main class="container">
|
||||
<main class="container-xl">
|
||||
<div id="users-block" class="my-3 p-3 bg-white rounded shadow">
|
||||
<h6 class="border-bottom pb-2 mb-3">Registered Users</h6>
|
||||
|
||||
@@ -7,10 +7,12 @@
|
||||
<thead>
|
||||
<tr>
|
||||
<th>User</th>
|
||||
<th style="width:60px; min-width: 60px;">Items</th>
|
||||
<th style="width:65px; min-width: 65px;">Created at</th>
|
||||
<th style="width:70px; min-width: 65px;">Last Active</th>
|
||||
<th style="width:35px; min-width: 35px;">Items</th>
|
||||
<th>Attachments</th>
|
||||
<th style="min-width: 120px;">Organizations</th>
|
||||
<th style="width: 140px; min-width: 140px;">Actions</th>
|
||||
<th style="width: 120px; min-width: 120px;">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
@@ -21,8 +23,6 @@
|
||||
<div class="float-left">
|
||||
<strong>{{Name}}</strong>
|
||||
<span class="d-block">{{Email}}</span>
|
||||
<span class="d-block">Created at: {{created_at}}</span>
|
||||
<span class="d-block">Last active: {{last_active}}</span>
|
||||
<span class="d-block">
|
||||
{{#unless user_enabled}}
|
||||
<span class="badge badge-danger mr-2" title="User is disabled">Disabled</span>
|
||||
@@ -39,6 +39,12 @@
|
||||
</span>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<span class="d-block">{{created_at}}</span>
|
||||
</td>
|
||||
<td>
|
||||
<span class="d-block">{{last_active}}</span>
|
||||
</td>
|
||||
<td>
|
||||
<span class="d-block">{{cipher_count}}</span>
|
||||
</td>
|
||||
@@ -49,9 +55,11 @@
|
||||
{{/if}}
|
||||
</td>
|
||||
<td>
|
||||
<div class="overflow-auto" style="max-height: 120px;">
|
||||
{{#each Organizations}}
|
||||
<span class="badge badge-primary" data-orgtype="{{Type}}">{{Name}}</span>
|
||||
<button class="badge badge-primary" data-toggle="modal" data-target="#userOrgTypeDialog" data-orgtype="{{Type}}" data-orguuid="{{jsesc Id no_quote}}" data-orgname="{{jsesc Name no_quote}}" data-useremail="{{jsesc ../Email no_quote}}" data-useruuid="{{jsesc ../Id no_quote}}">{{Name}}</button>
|
||||
{{/each}}
|
||||
</div>
|
||||
</td>
|
||||
<td style="font-size: 90%; text-align: right; padding-right: 15px">
|
||||
{{#if TwoFactorEnabled}}
|
||||
@@ -92,6 +100,41 @@
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="userOrgTypeDialog" class="modal fade" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered modal-sm">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header">
|
||||
<h6 class="modal-title" id="userOrgTypeDialogTitle"></h6>
|
||||
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
|
||||
<span aria-hidden="true">×</span>
|
||||
</button>
|
||||
</div>
|
||||
<form class="form" id="userOrgTypeForm" onsubmit="updateUserOrgType(); return false;">
|
||||
<input type="hidden" name="user_uuid" id="userOrgTypeUserUuid" value="">
|
||||
<input type="hidden" name="org_uuid" id="userOrgTypeOrgUuid" value="">
|
||||
<div class="modal-body">
|
||||
<div class="radio">
|
||||
<label><input type="radio" value="2" class="form-radio-input" name="user_type" id="userOrgTypeUser"> User</label>
|
||||
</div>
|
||||
<div class="radio">
|
||||
<label><input type="radio" value="3" class="form-radio-input" name="user_type" id="userOrgTypeManager"> Manager</label>
|
||||
</div>
|
||||
<div class="radio">
|
||||
<label><input type="radio" value="1" class="form-radio-input" name="user_type" id="userOrgTypeAdmin"> Admin</label>
|
||||
</div>
|
||||
<div class="radio">
|
||||
<label><input type="radio" value="0" class="form-radio-input" name="user_type" id="userOrgTypeOwner"> Owner</label>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-sm btn-secondary" data-dismiss="modal">Cancel</button>
|
||||
<button type="submit" class="btn btn-sm btn-primary">Change Role</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
||||
|
||||
<link rel="stylesheet" href="{{urlpath}}/bwrs_static/datatables.css" />
|
||||
@@ -163,9 +206,11 @@
|
||||
"3": { "name": "Manager", "color": "green" },
|
||||
};
|
||||
|
||||
document.querySelectorAll("img.identicon").forEach(function (e, i) {
|
||||
e.src = identicon(e.dataset.src);
|
||||
});
|
||||
(async () => {
|
||||
for (let e of document.querySelectorAll("img.identicon")) {
|
||||
e.src = await identicon(e.dataset.src);
|
||||
}
|
||||
})();
|
||||
|
||||
document.querySelectorAll("[data-orgtype]").forEach(function (e, i) {
|
||||
let orgtype = OrgTypes[e.dataset.orgtype];
|
||||
@@ -173,18 +218,76 @@
|
||||
e.title = orgtype.name;
|
||||
});
|
||||
|
||||
// Special sort function to sort dates in ISO format
|
||||
jQuery.extend( jQuery.fn.dataTableExt.oSort, {
|
||||
"date-iso-pre": function ( a ) {
|
||||
let x;
|
||||
let sortDate = a.replace(/(<([^>]+)>)/gi, "").trim();
|
||||
if ( sortDate !== '' ) {
|
||||
let dtParts = sortDate.split(' ');
|
||||
var timeParts = (undefined != dtParts[1]) ? dtParts[1].split(':') : [00,00,00];
|
||||
var dateParts = dtParts[0].split('-');
|
||||
x = (dateParts[0] + dateParts[1] + dateParts[2] + timeParts[0] + timeParts[1] + ((undefined != timeParts[2]) ? timeParts[2] : 0)) * 1;
|
||||
if ( isNaN(x) ) {
|
||||
x = 0;
|
||||
}
|
||||
} else {
|
||||
x = Infinity;
|
||||
}
|
||||
return x;
|
||||
},
|
||||
|
||||
"date-iso-asc": function ( a, b ) {
|
||||
return a - b;
|
||||
},
|
||||
|
||||
"date-iso-desc": function ( a, b ) {
|
||||
return b - a;
|
||||
}
|
||||
});
|
||||
|
||||
document.addEventListener("DOMContentLoaded", function(event) {
|
||||
$('#users-table').DataTable({
|
||||
"responsive": true,
|
||||
"lengthMenu": [ [-1, 5, 10, 25, 50], ["All", 5, 10, 25, 50] ],
|
||||
"pageLength": -1, // Default show all
|
||||
"columns": [
|
||||
null, // Userdata
|
||||
null, // Items
|
||||
null, // Attachments
|
||||
null, // Organizations
|
||||
{ "searchable": false, "orderable": false }, // Actions
|
||||
],
|
||||
"columnDefs": [
|
||||
{ "targets": [1,2], "type": "date-iso" },
|
||||
{ "targets": 6, "searchable": false, "orderable": false }
|
||||
]
|
||||
});
|
||||
});
|
||||
</script>
|
||||
|
||||
var userOrgTypeDialog = document.getElementById('userOrgTypeDialog');
|
||||
// Fill the form and title
|
||||
userOrgTypeDialog.addEventListener('show.bs.modal', function(event){
|
||||
let userOrgType = event.relatedTarget.getAttribute("data-orgtype");
|
||||
let userOrgTypeName = OrgTypes[userOrgType]["name"];
|
||||
let orgName = event.relatedTarget.getAttribute("data-orgname");
|
||||
let userEmail = event.relatedTarget.getAttribute("data-useremail");
|
||||
let orgUuid = event.relatedTarget.getAttribute("data-orguuid");
|
||||
let userUuid = event.relatedTarget.getAttribute("data-useruuid");
|
||||
|
||||
document.getElementById("userOrgTypeDialogTitle").innerHTML = "<b>Update User Type:</b><br><b>Organization:</b> " + orgName + "<br><b>User:</b> " + userEmail;
|
||||
document.getElementById("userOrgTypeUserUuid").value = userUuid;
|
||||
document.getElementById("userOrgTypeOrgUuid").value = orgUuid;
|
||||
document.getElementById("userOrgType"+userOrgTypeName).checked = true;
|
||||
}, false);
|
||||
|
||||
// Prevent accidental submission of the form with valid elements after the modal has been hidden.
|
||||
userOrgTypeDialog.addEventListener('hide.bs.modal', function(event){
|
||||
document.getElementById("userOrgTypeDialogTitle").innerHTML = '';
|
||||
document.getElementById("userOrgTypeUserUuid").value = '';
|
||||
document.getElementById("userOrgTypeOrgUuid").value = '';
|
||||
}, false);
|
||||
|
||||
function updateUserOrgType() {
|
||||
let orgForm = document.getElementById("userOrgTypeForm");
|
||||
const data = JSON.stringify(Object.fromEntries(new FormData(orgForm).entries()));
|
||||
|
||||
_post("{{urlpath}}/admin/users/org_type",
|
||||
"Updated organization type of the user successfully",
|
||||
"Error updating organization type of the user", data);
|
||||
return false;
|
||||
}
|
||||
</script>
|
||||
|
@@ -5,4 +5,4 @@ To finalize changing your email address enter the following code in web vault: {
|
||||
If you did not try to change an email address, you can safely ignore this email.
|
||||
|
||||
===
|
||||
Github: https://github.com/dani-garcia/bitwarden_rs
|
||||
Github: https://github.com/dani-garcia/vaultwarden
|
||||
|
@@ -4,7 +4,7 @@ Your Email Change
|
||||
<head>
|
||||
<meta name="viewport" content="width=device-width" />
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
|
||||
<title>Bitwarden_rs</title>
|
||||
<title>Vaultwarden</title>
|
||||
</head>
|
||||
<body style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; height: 100%; line-height: 25px; width: 100% !important;" bgcolor="#f6f6f6">
|
||||
<style type="text/css">
|
||||
@@ -113,7 +113,7 @@ Your Email Change
|
||||
<td class="aligncenter social-icons" align="center" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; padding: 15px 0 0 0;" valign="top">
|
||||
<table cellpadding="0" cellspacing="0" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0 auto;">
|
||||
<tr style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0;">
|
||||
<td style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; padding: 0 10px;" valign="top"><a href="https://github.com/dani-garcia/bitwarden_rs" target="_blank" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; text-decoration: underline;"><img src="{{url}}/bwrs_static/mail-github.png" alt="GitHub" width="30" height="30" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; border: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0; max-width: 100%;" /></a></td>
|
||||
<td style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; padding: 0 10px;" valign="top"><a href="https://github.com/dani-garcia/vaultwarden" target="_blank" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; text-decoration: underline;"><img src="{{url}}/bwrs_static/mail-github.png" alt="GitHub" width="30" height="30" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; border: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0; max-width: 100%;" /></a></td>
|
||||
</tr>
|
||||
</table>
|
||||
</td>
|
||||
|