mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-11 11:15:58 +03:00
Compare commits
58 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
040e2a7bb0 | ||
|
d184c8f08c | ||
|
7d6dec6413 | ||
|
de01111082 | ||
|
0bd8f607cb | ||
|
21efc0800d | ||
|
1031c2e286 | ||
|
1bf85201e7 | ||
|
6ceed9284d | ||
|
25d99e3506 | ||
|
dca14285fd | ||
|
66baa5e7d8 | ||
|
248e561b3f | ||
|
55623ad9c6 | ||
|
e9acd8bd3c | ||
|
544b7229e8 | ||
|
978f009293 | ||
|
92f1530e96 | ||
|
2b824e8096 | ||
|
059661be48 | ||
|
0f3f97cc76 | ||
|
aa0fe7785a | ||
|
65d11a9720 | ||
|
c722006385 | ||
|
aaab7f9640 | ||
|
cbdb5657f1 | ||
|
669b9db758 | ||
|
3466a8040e | ||
|
7d47155d83 | ||
|
9e26014b4d | ||
|
339612c917 | ||
|
9eebbf3b9f | ||
|
b557c11724 | ||
|
a1204cc935 | ||
|
1ea511cbfc | ||
|
2e6a6fa39f | ||
|
e7d5c17ff7 | ||
|
a7be8fab9b | ||
|
39d4d31080 | ||
|
c28246cf34 | ||
|
d7df0ad79e | ||
|
7c8ba0c232 | ||
|
d335187172 | ||
|
f858523d92 | ||
|
529c39c6c5 | ||
|
b428481ac0 | ||
|
b4b2701905 | ||
|
de66e56b6c | ||
|
ecfebaf3c7 | ||
|
0e53f58288 | ||
|
bc7ceb2ee3 | ||
|
b27e6e30c9 | ||
|
505b30eec2 | ||
|
54bfcb8bc3 | ||
|
035f694d2f | ||
|
a4ab014ade | ||
|
6fedfceaa9 | ||
|
8e8483481f |
@@ -1,40 +1,15 @@
|
|||||||
# Local build artifacts
|
// Ignore everything
|
||||||
target
|
*
|
||||||
|
|
||||||
# Data folder
|
// Allow what is needed
|
||||||
data
|
!.git
|
||||||
|
|
||||||
# Misc
|
|
||||||
.env
|
|
||||||
.env.template
|
|
||||||
.gitattributes
|
|
||||||
.gitignore
|
|
||||||
rustfmt.toml
|
|
||||||
|
|
||||||
# IDE files
|
|
||||||
.vscode
|
|
||||||
.idea
|
|
||||||
.editorconfig
|
|
||||||
*.iml
|
|
||||||
|
|
||||||
# Documentation
|
|
||||||
.github
|
|
||||||
*.md
|
|
||||||
*.txt
|
|
||||||
*.yml
|
|
||||||
*.yaml
|
|
||||||
|
|
||||||
# Docker
|
|
||||||
hooks
|
|
||||||
tools
|
|
||||||
Dockerfile
|
|
||||||
.dockerignore
|
|
||||||
docker/**
|
|
||||||
!docker/healthcheck.sh
|
!docker/healthcheck.sh
|
||||||
!docker/start.sh
|
!docker/start.sh
|
||||||
|
!migrations
|
||||||
|
!src
|
||||||
|
|
||||||
# Web vault
|
!build.rs
|
||||||
web-vault
|
!Cargo.lock
|
||||||
|
!Cargo.toml
|
||||||
# Vaultwarden Resources
|
!rustfmt.toml
|
||||||
resources
|
!rust-toolchain.toml
|
||||||
|
@@ -92,15 +92,20 @@
|
|||||||
##########################
|
##########################
|
||||||
|
|
||||||
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
||||||
## If you choose "European Union" Data Region, uncomment PUSH_RELAY_URI and PUSH_IDENTITY_URI then replace .com by .eu
|
|
||||||
## Details about mobile client push notification:
|
## Details about mobile client push notification:
|
||||||
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-Mobile-Client-push-notification
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-Mobile-Client-push-notification
|
||||||
# PUSH_ENABLED=false
|
# PUSH_ENABLED=false
|
||||||
# PUSH_INSTALLATION_ID=CHANGEME
|
# PUSH_INSTALLATION_ID=CHANGEME
|
||||||
# PUSH_INSTALLATION_KEY=CHANGEME
|
# PUSH_INSTALLATION_KEY=CHANGEME
|
||||||
## Don't change this unless you know what you're doing.
|
|
||||||
|
# WARNING: Do not modify the following settings unless you fully understand their implications!
|
||||||
|
# Default Push Relay and Identity URIs
|
||||||
# PUSH_RELAY_URI=https://push.bitwarden.com
|
# PUSH_RELAY_URI=https://push.bitwarden.com
|
||||||
# PUSH_IDENTITY_URI=https://identity.bitwarden.com
|
# PUSH_IDENTITY_URI=https://identity.bitwarden.com
|
||||||
|
# European Union Data Region Settings
|
||||||
|
# If you have selected "European Union" as your data region, use the following URIs instead.
|
||||||
|
# PUSH_RELAY_URI=https://api.bitwarden.eu
|
||||||
|
# PUSH_IDENTITY_URI=https://identity.bitwarden.eu
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
### Schedule jobs ###
|
### Schedule jobs ###
|
||||||
@@ -152,6 +157,10 @@
|
|||||||
## Cron schedule of the job that cleans old auth requests from the auth request.
|
## Cron schedule of the job that cleans old auth requests from the auth request.
|
||||||
## Defaults to every minute. Set blank to disable this job.
|
## Defaults to every minute. Set blank to disable this job.
|
||||||
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
|
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that cleans expired Duo contexts from the database. Does nothing if Duo MFA is disabled or set to use the legacy iframe prompt.
|
||||||
|
## Defaults to every minute. Set blank to disable this job.
|
||||||
|
# DUO_CONTEXT_PURGE_SCHEDULE="30 * * * * *"
|
||||||
|
|
||||||
########################
|
########################
|
||||||
### General settings ###
|
### General settings ###
|
||||||
@@ -320,15 +329,15 @@
|
|||||||
## The default is 10 seconds, but this could be to low on slower network connections
|
## The default is 10 seconds, but this could be to low on slower network connections
|
||||||
# ICON_DOWNLOAD_TIMEOUT=10
|
# ICON_DOWNLOAD_TIMEOUT=10
|
||||||
|
|
||||||
## Icon blacklist Regex
|
## Block HTTP domains/IPs by Regex
|
||||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
## Any domains or IPs that match this regex won't be fetched by the internal HTTP client.
|
||||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
## NOTE: Always enclose this regex withing single quotes!
|
## NOTE: Always enclose this regex withing single quotes!
|
||||||
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
# HTTP_REQUEST_BLOCK_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||||
|
|
||||||
## Any IP which is not defined as a global IP will be blacklisted.
|
## Enabling this will cause the internal HTTP client to refuse to connect to any non global IP address.
|
||||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
# HTTP_REQUEST_BLOCK_NON_GLOBAL_IPS=true
|
||||||
|
|
||||||
## Client Settings
|
## Client Settings
|
||||||
## Enable experimental feature flags for clients.
|
## Enable experimental feature flags for clients.
|
||||||
@@ -362,8 +371,9 @@
|
|||||||
## Log level
|
## Log level
|
||||||
## Change the verbosity of the log output
|
## Change the verbosity of the log output
|
||||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
## Setting it to "trace" or "debug" would also show logs for mounted
|
## Setting it to "trace" or "debug" would also show logs for mounted routes and static file, websocket and alive requests
|
||||||
## routes and static file, websocket and alive requests
|
## For a specific module append a comma separated `path::to::module=log_level`
|
||||||
|
## For example, to only see debug logs for icons use: LOG_LEVEL="info,vaultwarden::api::icons=debug"
|
||||||
# LOG_LEVEL=info
|
# LOG_LEVEL=info
|
||||||
|
|
||||||
## Token for the admin interface, preferably an Argon2 PCH string
|
## Token for the admin interface, preferably an Argon2 PCH string
|
||||||
@@ -409,6 +419,18 @@
|
|||||||
## KNOW WHAT YOU ARE DOING!
|
## KNOW WHAT YOU ARE DOING!
|
||||||
# ORG_GROUPS_ENABLED=false
|
# ORG_GROUPS_ENABLED=false
|
||||||
|
|
||||||
|
## Increase secure note size limit (Know the risks!)
|
||||||
|
## Sets the secure note size limit to 100_000 instead of the default 10_000.
|
||||||
|
## WARNING: This could cause issues with clients. Also exports will not work on Bitwarden servers!
|
||||||
|
## KNOW WHAT YOU ARE DOING!
|
||||||
|
# INCREASE_NOTE_SIZE_LIMIT=false
|
||||||
|
|
||||||
|
## Enforce Single Org with Reset Password Policy
|
||||||
|
## Enforce that the Single Org policy is enabled before setting the Reset Password policy
|
||||||
|
## Bitwarden enforces this by default. In Vaultwarden we encouraged to use multiple organizations because groups were not available.
|
||||||
|
## Setting this to true will enforce the Single Org Policy to be enabled before you can enable the Reset Password policy.
|
||||||
|
# ENFORCE_SINGLE_ORG_WITH_RESET_PW_POLICY=false
|
||||||
|
|
||||||
########################
|
########################
|
||||||
### MFA/2FA settings ###
|
### MFA/2FA settings ###
|
||||||
########################
|
########################
|
||||||
@@ -422,15 +444,21 @@
|
|||||||
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
|
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
|
||||||
|
|
||||||
## Duo Settings
|
## Duo Settings
|
||||||
## You need to configure all options to enable global Duo support, otherwise users would need to configure it themselves
|
## You need to configure the DUO_IKEY, DUO_SKEY, and DUO_HOST options to enable global Duo support.
|
||||||
|
## Otherwise users will need to configure it themselves.
|
||||||
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
|
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
|
||||||
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
|
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
|
||||||
## Then set the following options, based on the values obtained from the last step:
|
## Then set the following options, based on the values obtained from the last step:
|
||||||
# DUO_IKEY=<Integration Key>
|
# DUO_IKEY=<Client ID>
|
||||||
# DUO_SKEY=<Secret Key>
|
# DUO_SKEY=<Client Secret>
|
||||||
# DUO_HOST=<API Hostname>
|
# DUO_HOST=<API Hostname>
|
||||||
## After that, you should be able to follow the rest of the guide linked above,
|
## After that, you should be able to follow the rest of the guide linked above,
|
||||||
## ignoring the fields that ask for the values that you already configured beforehand.
|
## ignoring the fields that ask for the values that you already configured beforehand.
|
||||||
|
##
|
||||||
|
## If you want to attempt to use Duo's 'Traditional Prompt' (deprecated, iframe based) set DUO_USE_IFRAME to 'true'.
|
||||||
|
## Duo no longer supports this, but it still works for some integrations.
|
||||||
|
## If you aren't sure, leave this alone.
|
||||||
|
# DUO_USE_IFRAME=false
|
||||||
|
|
||||||
## Email 2FA settings
|
## Email 2FA settings
|
||||||
## Email token size
|
## Email token size
|
||||||
|
66
.github/ISSUE_TEMPLATE/bug_report.md
vendored
66
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,66 +0,0 @@
|
|||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Use this ONLY for bugs in vaultwarden itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
|
|
||||||
title: ''
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
<!--
|
|
||||||
# ###
|
|
||||||
NOTE: Please update to the latest version of vaultwarden before reporting an issue!
|
|
||||||
This saves you and us a lot of time and troubleshooting.
|
|
||||||
See:
|
|
||||||
* https://github.com/dani-garcia/vaultwarden/issues/1180
|
|
||||||
* https://github.com/dani-garcia/vaultwarden/wiki/Updating-the-vaultwarden-image
|
|
||||||
# ###
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Please fill out the following template to make solving your problem easier and faster for us.
|
|
||||||
This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them.
|
|
||||||
|
|
||||||
Remember to hide/redact personal or confidential information,
|
|
||||||
such as passwords, IP addresses, and DNS names as appropriate.
|
|
||||||
-->
|
|
||||||
|
|
||||||
### Subject of the issue
|
|
||||||
<!-- Describe your issue here. -->
|
|
||||||
|
|
||||||
### Deployment environment
|
|
||||||
|
|
||||||
<!--
|
|
||||||
=========================================================================================
|
|
||||||
Preferably, use the `Generate Support String` button on the admin page's Diagnostics tab.
|
|
||||||
That will auto-generate most of the info requested in this section.
|
|
||||||
=========================================================================================
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
|
|
||||||
<!-- This is NOT the version number shown on the web vault, which is versioned separately from vaultwarden -->
|
|
||||||
<!-- Remember to check if your issue exists on the latest version first! -->
|
|
||||||
* vaultwarden version:
|
|
||||||
|
|
||||||
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
|
|
||||||
* Install method:
|
|
||||||
|
|
||||||
* Clients used: <!-- web vault, desktop, Android, iOS, etc. (if applicable) -->
|
|
||||||
|
|
||||||
* Reverse proxy and version: <!-- if applicable -->
|
|
||||||
|
|
||||||
* MySQL/MariaDB or PostgreSQL version: <!-- if applicable -->
|
|
||||||
|
|
||||||
* Other relevant details:
|
|
||||||
|
|
||||||
### Steps to reproduce
|
|
||||||
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
|
||||||
and how did you start vaultwarden? -->
|
|
||||||
|
|
||||||
### Expected behaviour
|
|
||||||
<!-- Tell us what you expected to happen -->
|
|
||||||
|
|
||||||
### Actual behaviour
|
|
||||||
<!-- Tell us what actually happened -->
|
|
||||||
|
|
||||||
### Troubleshooting data
|
|
||||||
<!-- Share any log files, screenshots, or other relevant troubleshooting data -->
|
|
167
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
167
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,167 @@
|
|||||||
|
name: Bug Report
|
||||||
|
description: File a bug report
|
||||||
|
labels: ["bug"]
|
||||||
|
body:
|
||||||
|
#
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for taking the time to fill out this bug report!
|
||||||
|
|
||||||
|
Please *do not* submit feature requests or ask for help on how to configure Vaultwarden here.
|
||||||
|
|
||||||
|
The [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions/) has sections for Questions and Ideas.
|
||||||
|
|
||||||
|
Also, make sure you are running [](https://github.com/dani-garcia/vaultwarden/releases/latest) of Vaultwarden!
|
||||||
|
And search for existing open or closed issues or discussions regarding your topic before posting.
|
||||||
|
|
||||||
|
Be sure to check and validate the Vaultwarden Admin Diagnostics (`/admin/diagnostics`) page for any errors!
|
||||||
|
See here [how to enable the admin page](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page).
|
||||||
|
#
|
||||||
|
- id: support-string
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Vaultwarden Support String
|
||||||
|
description: Output of the **Generate Support String** from the `/admin/diagnostics` page.
|
||||||
|
placeholder: |
|
||||||
|
1. Go to the Vaultwarden Admin of your instance https://example.domain.tld/admin/diagnostics
|
||||||
|
2. Click on `Generate Support String`
|
||||||
|
3. Click on `Copy To Clipboard`
|
||||||
|
4. Replace this text by pasting it into this textarea without any modifications
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: version
|
||||||
|
type: input
|
||||||
|
attributes:
|
||||||
|
label: Vaultwarden Build Version
|
||||||
|
description: What version of Vaultwarden are you running?
|
||||||
|
placeholder: ex. v1.31.0 or v1.32.0-3466a804
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: deployment
|
||||||
|
type: dropdown
|
||||||
|
attributes:
|
||||||
|
label: Deployment method
|
||||||
|
description: How did you deploy Vaultwarden?
|
||||||
|
multiple: false
|
||||||
|
options:
|
||||||
|
- Official Container Image
|
||||||
|
- Build from source
|
||||||
|
- OS Package (apt, yum/dnf, pacman, apk, nix, ...)
|
||||||
|
- Manually Extracted from Container Image
|
||||||
|
- Downloaded from GitHub Actions Release Workflow
|
||||||
|
- Other method
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: deployment-other
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Custom deployment method
|
||||||
|
description: If you deployed Vaultwarden via any other method, please describe how.
|
||||||
|
#
|
||||||
|
- id: reverse-proxy
|
||||||
|
type: input
|
||||||
|
attributes:
|
||||||
|
label: Reverse Proxy
|
||||||
|
description: Are you using a reverse proxy, if so which and what version?
|
||||||
|
placeholder: ex. nginx 1.26.2, caddy 2.8.4, traefik 3.1.2, haproxy 3.0
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: os
|
||||||
|
type: dropdown
|
||||||
|
attributes:
|
||||||
|
label: Host/Server Operating System
|
||||||
|
description: On what operating system are you running the Vaultwarden server?
|
||||||
|
multiple: false
|
||||||
|
options:
|
||||||
|
- Linux
|
||||||
|
- NAS/SAN
|
||||||
|
- Cloud
|
||||||
|
- Windows
|
||||||
|
- macOS
|
||||||
|
- Other
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: os-version
|
||||||
|
type: input
|
||||||
|
attributes:
|
||||||
|
label: Operating System Version
|
||||||
|
description: What version of the operating system(s) are you seeing the problem on?
|
||||||
|
placeholder: ex. Arch Linux, Ubuntu 24.04, Kubernetes, Synology DSM 7.x, Windows 11
|
||||||
|
#
|
||||||
|
- id: clients
|
||||||
|
type: dropdown
|
||||||
|
attributes:
|
||||||
|
label: Clients
|
||||||
|
description: What client(s) are you seeing the problem on?
|
||||||
|
multiple: true
|
||||||
|
options:
|
||||||
|
- Web Vault
|
||||||
|
- Browser Extension
|
||||||
|
- CLI
|
||||||
|
- Desktop
|
||||||
|
- Android
|
||||||
|
- iOS
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: client-version
|
||||||
|
type: input
|
||||||
|
attributes:
|
||||||
|
label: Client Version
|
||||||
|
description: What version(s) of the client(s) are you seeing the problem on?
|
||||||
|
placeholder: ex. CLI v2024.7.2, Firefox 130 - v2024.7.0
|
||||||
|
#
|
||||||
|
- id: reproduce
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Steps To Reproduce
|
||||||
|
description: How can we reproduce the behavior.
|
||||||
|
value: |
|
||||||
|
1. Go to '...'
|
||||||
|
2. Click on '....'
|
||||||
|
3. Scroll down to '....'
|
||||||
|
4. Click on '...'
|
||||||
|
5. Etc '...'
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: expected
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Expected Result
|
||||||
|
description: A clear and concise description of what you expected to happen.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: actual
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Actual Result
|
||||||
|
description: A clear and concise description of what is happening.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
#
|
||||||
|
- id: logs
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Logs
|
||||||
|
description: Provide the logs generated by Vaultwarden during the time this issue occurs.
|
||||||
|
render: text
|
||||||
|
#
|
||||||
|
- id: screenshots
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Screenshots or Videos
|
||||||
|
description: If applicable, add screenshots and/or a short video to help explain your problem.
|
||||||
|
#
|
||||||
|
- id: additional-context
|
||||||
|
type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Additional Context
|
||||||
|
description: Add any other context about the problem here.
|
10
.github/ISSUE_TEMPLATE/config.yml
vendored
10
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,8 +1,8 @@
|
|||||||
blank_issues_enabled: false
|
blank_issues_enabled: false
|
||||||
contact_links:
|
contact_links:
|
||||||
- name: Discourse forum for vaultwarden
|
- name: GitHub Discussions for Vaultwarden
|
||||||
url: https://vaultwarden.discourse.group/
|
|
||||||
about: Use this forum to request features or get help with usage/configuration.
|
|
||||||
- name: GitHub Discussions for vaultwarden
|
|
||||||
url: https://github.com/dani-garcia/vaultwarden/discussions
|
url: https://github.com/dani-garcia/vaultwarden/discussions
|
||||||
about: An alternative to the Discourse forum, if this is easier for you.
|
about: Use the discussions to request features or get help with usage/configuration.
|
||||||
|
- name: Discourse forum for Vaultwarden
|
||||||
|
url: https://vaultwarden.discourse.group/
|
||||||
|
about: An alternative to the GitHub Discussions, if this is easier for you.
|
||||||
|
5
.github/workflows/build.yml
vendored
5
.github/workflows/build.yml
vendored
@@ -28,6 +28,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
# We use Ubuntu 22.04 here because this matches the library versions used within the Debian docker containers
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
# Make warnings errors, this is to prevent warnings slipping through.
|
# Make warnings errors, this is to prevent warnings slipping through.
|
||||||
@@ -74,7 +75,7 @@ jobs:
|
|||||||
|
|
||||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||||
- name: "Install rust-toolchain version"
|
- name: "Install rust-toolchain version"
|
||||||
uses: dtolnay/rust-toolchain@21dc36fb71dd22e3317045c0c31a3f4249868b17 # master @ Jun 13, 2024, 6:20 PM GMT+2
|
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a # master @ Aug 8, 2024, 7:36 PM GMT+2
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
@@ -84,7 +85,7 @@ jobs:
|
|||||||
|
|
||||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||||
- name: "Install MSRV version"
|
- name: "Install MSRV version"
|
||||||
uses: dtolnay/rust-toolchain@21dc36fb71dd22e3317045c0c31a3f4249868b17 # master @ Jun 13, 2024, 6:20 PM GMT+2
|
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a # master @ Aug 8, 2024, 7:36 PM GMT+2
|
||||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
|
28
.github/workflows/hadolint.yml
vendored
28
.github/workflows/hadolint.yml
vendored
@@ -8,7 +8,7 @@ on: [
|
|||||||
jobs:
|
jobs:
|
||||||
hadolint:
|
hadolint:
|
||||||
name: Validate Dockerfile syntax
|
name: Validate Dockerfile syntax
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
@@ -16,6 +16,18 @@ jobs:
|
|||||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
# Start Docker Buildx
|
||||||
|
- name: Setup Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
|
||||||
|
# https://github.com/moby/buildkit/issues/3969
|
||||||
|
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||||
|
with:
|
||||||
|
buildkitd-config-inline: |
|
||||||
|
[worker.oci]
|
||||||
|
max-parallelism = 2
|
||||||
|
driver-opts: |
|
||||||
|
network=host
|
||||||
|
|
||||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||||
- name: Download hadolint
|
- name: Download hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -26,8 +38,18 @@ jobs:
|
|||||||
HADOLINT_VERSION: 2.12.0
|
HADOLINT_VERSION: 2.12.0
|
||||||
# End Download hadolint
|
# End Download hadolint
|
||||||
|
|
||||||
# Test Dockerfiles
|
# Test Dockerfiles with hadolint
|
||||||
- name: Run hadolint
|
- name: Run hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
run: hadolint docker/Dockerfile.{debian,alpine}
|
run: hadolint docker/Dockerfile.{debian,alpine}
|
||||||
# End Test Dockerfiles
|
# End Test Dockerfiles with hadolint
|
||||||
|
|
||||||
|
# Test Dockerfiles with docker build checks
|
||||||
|
- name: Run docker build check
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "Checking docker/Dockerfile.debian"
|
||||||
|
docker build --check . -f docker/Dockerfile.debian
|
||||||
|
echo "Checking docker/Dockerfile.alpine"
|
||||||
|
docker build --check . -f docker/Dockerfile.alpine
|
||||||
|
# End Test Dockerfiles with docker build checks
|
||||||
|
28
.github/workflows/release.yml
vendored
28
.github/workflows/release.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
|||||||
# Some checks to determine if we need to continue with building a new docker.
|
# Some checks to determine if we need to continue with building a new docker.
|
||||||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||||
skip_check:
|
skip_check:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
outputs:
|
outputs:
|
||||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||||
@@ -27,7 +27,7 @@ jobs:
|
|||||||
if: ${{ github.ref_type == 'branch' }}
|
if: ${{ github.ref_type == 'branch' }}
|
||||||
|
|
||||||
docker-build:
|
docker-build:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
needs: skip_check
|
needs: skip_check
|
||||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
@@ -63,19 +63,19 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Initialize QEMU binfmt support
|
- name: Initialize QEMU binfmt support
|
||||||
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
|
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0
|
||||||
with:
|
with:
|
||||||
platforms: "arm64,arm"
|
platforms: "arm64,arm"
|
||||||
|
|
||||||
# Start Docker Buildx
|
# Start Docker Buildx
|
||||||
- name: Setup Docker Buildx
|
- name: Setup Docker Buildx
|
||||||
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
|
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
|
||||||
# https://github.com/moby/buildkit/issues/3969
|
# https://github.com/moby/buildkit/issues/3969
|
||||||
# Also set max parallelism to 3, the default of 4 breaks GitHub Actions and causes OOMKills
|
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||||
with:
|
with:
|
||||||
buildkitd-config-inline: |
|
buildkitd-config-inline: |
|
||||||
[worker.oci]
|
[worker.oci]
|
||||||
max-parallelism = 3
|
max-parallelism = 2
|
||||||
driver-opts: |
|
driver-opts: |
|
||||||
network=host
|
network=host
|
||||||
|
|
||||||
@@ -102,7 +102,7 @@ jobs:
|
|||||||
|
|
||||||
# Login to Docker Hub
|
# Login to Docker Hub
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -116,7 +116,7 @@ jobs:
|
|||||||
|
|
||||||
# Login to GitHub Container Registry
|
# Login to GitHub Container Registry
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
@@ -131,7 +131,7 @@ jobs:
|
|||||||
|
|
||||||
# Login to Quay.io
|
# Login to Quay.io
|
||||||
- name: Login to Quay.io
|
- name: Login to Quay.io
|
||||||
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||||
with:
|
with:
|
||||||
registry: quay.io
|
registry: quay.io
|
||||||
username: ${{ secrets.QUAY_USERNAME }}
|
username: ${{ secrets.QUAY_USERNAME }}
|
||||||
@@ -165,7 +165,7 @@ jobs:
|
|||||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
- name: Bake ${{ matrix.base_image }} containers
|
- name: Bake ${{ matrix.base_image }} containers
|
||||||
uses: docker/bake-action@1c5f18a523c4c68524cfbc5161494d8bb5b29d20 # v5.0.1
|
uses: docker/bake-action@76cc8060bdff6d632a465001e4cf300684c5472c # v5.7.0
|
||||||
env:
|
env:
|
||||||
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||||
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||||
@@ -223,28 +223,28 @@ jobs:
|
|||||||
|
|
||||||
# Upload artifacts to Github Actions
|
# Upload artifacts to Github Actions
|
||||||
- name: "Upload amd64 artifact"
|
- name: "Upload amd64 artifact"
|
||||||
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
with:
|
with:
|
||||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
|
||||||
path: vaultwarden-amd64
|
path: vaultwarden-amd64
|
||||||
|
|
||||||
- name: "Upload arm64 artifact"
|
- name: "Upload arm64 artifact"
|
||||||
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
with:
|
with:
|
||||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
|
||||||
path: vaultwarden-arm64
|
path: vaultwarden-arm64
|
||||||
|
|
||||||
- name: "Upload armv7 artifact"
|
- name: "Upload armv7 artifact"
|
||||||
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
with:
|
with:
|
||||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
|
||||||
path: vaultwarden-armv7
|
path: vaultwarden-armv7
|
||||||
|
|
||||||
- name: "Upload armv6 artifact"
|
- name: "Upload armv6 artifact"
|
||||||
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
with:
|
with:
|
||||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
|
||||||
|
2
.github/workflows/releasecache-cleanup.yml
vendored
2
.github/workflows/releasecache-cleanup.yml
vendored
@@ -13,7 +13,7 @@ name: Cleanup
|
|||||||
jobs:
|
jobs:
|
||||||
releasecache-cleanup:
|
releasecache-cleanup:
|
||||||
name: Releasecache Cleanup
|
name: Releasecache Cleanup
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
steps:
|
steps:
|
||||||
|
4
.github/workflows/trivy.yml
vendored
4
.github/workflows/trivy.yml
vendored
@@ -17,7 +17,7 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
trivy-scan:
|
trivy-scan:
|
||||||
name: Check
|
name: Check
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@@ -28,7 +28,7 @@ jobs:
|
|||||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 #v4.1.7
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 #v4.1.7
|
||||||
|
|
||||||
- name: Run Trivy vulnerability scanner
|
- name: Run Trivy vulnerability scanner
|
||||||
uses: aquasecurity/trivy-action@7c2007bcb556501da015201bcba5aa14069b74e2 # v0.23.0
|
uses: aquasecurity/trivy-action@6e7b7d1fd3e4fef0c5fa8cce1229c54b2c9bd0d8 # v0.24.0
|
||||||
with:
|
with:
|
||||||
scan-type: repo
|
scan-type: repo
|
||||||
ignore-unfixed: true
|
ignore-unfixed: true
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v4.5.0
|
rev: v4.6.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-yaml
|
- id: check-yaml
|
||||||
- id: check-json
|
- id: check-json
|
||||||
|
1141
Cargo.lock
generated
1141
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
70
Cargo.toml
70
Cargo.toml
@@ -3,7 +3,7 @@ name = "vaultwarden"
|
|||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.78.0"
|
rust-version = "1.79.0"
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||||
@@ -18,17 +18,17 @@ build = "build.rs"
|
|||||||
enable_syslog = []
|
enable_syslog = []
|
||||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||||
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
||||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "dep:libsqlite3-sys"]
|
||||||
# Enable to use a vendored and statically linked openssl
|
# Enable to use a vendored and statically linked openssl
|
||||||
vendored_openssl = ["openssl/vendored"]
|
vendored_openssl = ["openssl/vendored"]
|
||||||
# Enable MiMalloc memory allocator to replace the default malloc
|
# Enable MiMalloc memory allocator to replace the default malloc
|
||||||
# This can improve performance for Alpine builds
|
# This can improve performance for Alpine builds
|
||||||
enable_mimalloc = ["mimalloc"]
|
enable_mimalloc = ["dep:mimalloc"]
|
||||||
# This is a development dependency, and should only be used during development!
|
# This is a development dependency, and should only be used during development!
|
||||||
# It enables the usage of the diesel_logger crate, which is able to output the generated queries.
|
# It enables the usage of the diesel_logger crate, which is able to output the generated queries.
|
||||||
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
|
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
|
||||||
# if you want to turn off the logging for a specific run.
|
# if you want to turn off the logging for a specific run.
|
||||||
query_logger = ["diesel_logger"]
|
query_logger = ["dep:diesel_logger"]
|
||||||
|
|
||||||
# Enable unstable features, requires nightly
|
# Enable unstable features, requires nightly
|
||||||
# Currently only used to enable rusts official ip support
|
# Currently only used to enable rusts official ip support
|
||||||
@@ -63,34 +63,34 @@ rocket_ws = { version ="0.1.1" }
|
|||||||
rmpv = "1.3.0" # MessagePack library
|
rmpv = "1.3.0" # MessagePack library
|
||||||
|
|
||||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||||
dashmap = "6.0.1"
|
dashmap = "6.1.0"
|
||||||
|
|
||||||
# Async futures
|
# Async futures
|
||||||
futures = "0.3.30"
|
futures = "0.3.30"
|
||||||
tokio = { version = "1.38.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
tokio = { version = "1.40.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = { version = "1.0.204", features = ["derive"] }
|
serde = { version = "1.0.210", features = ["derive"] }
|
||||||
serde_json = "1.0.120"
|
serde_json = "1.0.128"
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "2.2.1", features = ["chrono", "r2d2", "numeric"] }
|
diesel = { version = "2.2.4", features = ["chrono", "r2d2", "numeric"] }
|
||||||
diesel_migrations = "2.2.0"
|
diesel_migrations = "2.2.0"
|
||||||
diesel_logger = { version = "0.3.0", optional = true }
|
diesel_logger = { version = "0.3.0", optional = true }
|
||||||
|
|
||||||
# Bundled/Static SQLite
|
# Bundled/Static SQLite
|
||||||
libsqlite3-sys = { version = "0.28.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.30.1", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto-related libraries
|
# Crypto-related libraries
|
||||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||||
ring = "0.17.8"
|
ring = "0.17.8"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "1.9.1", features = ["v4"] }
|
uuid = { version = "1.10.0", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time libraries
|
# Date and time libraries
|
||||||
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
|
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
|
||||||
chrono-tz = "0.9.0"
|
chrono-tz = "0.10.0"
|
||||||
time = "0.3.36"
|
time = "0.3.36"
|
||||||
|
|
||||||
# Job scheduler
|
# Job scheduler
|
||||||
@@ -115,32 +115,32 @@ webauthn-rs = "0.3.2"
|
|||||||
url = "2.5.2"
|
url = "2.5.2"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = { version = "0.11.7", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
lettre = { version = "0.11.9", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||||
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
||||||
email_address = "0.2.5"
|
email_address = "0.2.9"
|
||||||
|
|
||||||
# HTML Template library
|
# HTML Template library
|
||||||
handlebars = { version = "5.1.2", features = ["dir_source"] }
|
handlebars = { version = "6.1.0", features = ["dir_source"] }
|
||||||
|
|
||||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||||
reqwest = { version = "0.12.5", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
|
reqwest = { version = "0.12.7", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||||
hickory-resolver = "0.24.1"
|
hickory-resolver = "0.24.1"
|
||||||
|
|
||||||
# Favicon extraction libraries
|
# Favicon extraction libraries
|
||||||
html5gum = "0.5.7"
|
html5gum = "0.5.7"
|
||||||
regex = { version = "1.10.5", features = ["std", "perf", "unicode-perl"], default-features = false }
|
regex = { version = "1.10.6", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||||
data-url = "0.3.1"
|
data-url = "0.3.1"
|
||||||
bytes = "1.6.0"
|
bytes = "1.7.2"
|
||||||
|
|
||||||
# Cache function results (Used for version check and favicon fetching)
|
# Cache function results (Used for version check and favicon fetching)
|
||||||
cached = { version = "0.52.0", features = ["async"] }
|
cached = { version = "0.53.1", features = ["async"] }
|
||||||
|
|
||||||
# Used for custom short lived cookie jar during favicon extraction
|
# Used for custom short lived cookie jar during favicon extraction
|
||||||
cookie = "0.18.1"
|
cookie = "0.18.1"
|
||||||
cookie_store = "0.21.0"
|
cookie_store = "0.21.0"
|
||||||
|
|
||||||
# Used by U2F, JWT and PostgreSQL
|
# Used by U2F, JWT and PostgreSQL
|
||||||
openssl = "0.10.64"
|
openssl = "0.10.66"
|
||||||
|
|
||||||
# CLI argument parsing
|
# CLI argument parsing
|
||||||
pico-args = "0.5.0"
|
pico-args = "0.5.0"
|
||||||
@@ -155,7 +155,7 @@ semver = "1.0.23"
|
|||||||
# Allow overriding the default memory allocator
|
# Allow overriding the default memory allocator
|
||||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||||
mimalloc = { version = "0.1.43", features = ["secure"], default-features = false, optional = true }
|
mimalloc = { version = "0.1.43", features = ["secure"], default-features = false, optional = true }
|
||||||
which = "6.0.1"
|
which = "6.0.3"
|
||||||
|
|
||||||
# Argon2 library with support for the PHC format
|
# Argon2 library with support for the PHC format
|
||||||
argon2 = "0.5.3"
|
argon2 = "0.5.3"
|
||||||
@@ -198,33 +198,46 @@ lto = "thin"
|
|||||||
codegen-units = 16
|
codegen-units = 16
|
||||||
|
|
||||||
# Linting config
|
# Linting config
|
||||||
|
# https://doc.rust-lang.org/rustc/lints/groups.html
|
||||||
[lints.rust]
|
[lints.rust]
|
||||||
# Forbid
|
# Forbid
|
||||||
unsafe_code = "forbid"
|
unsafe_code = "forbid"
|
||||||
non_ascii_idents = "forbid"
|
non_ascii_idents = "forbid"
|
||||||
|
|
||||||
# Deny
|
# Deny
|
||||||
|
deprecated_in_future = "deny"
|
||||||
future_incompatible = { level = "deny", priority = -1 }
|
future_incompatible = { level = "deny", priority = -1 }
|
||||||
|
keyword_idents = { level = "deny", priority = -1 }
|
||||||
|
let_underscore = { level = "deny", priority = -1 }
|
||||||
noop_method_call = "deny"
|
noop_method_call = "deny"
|
||||||
|
refining_impl_trait = { level = "deny", priority = -1 }
|
||||||
rust_2018_idioms = { level = "deny", priority = -1 }
|
rust_2018_idioms = { level = "deny", priority = -1 }
|
||||||
rust_2021_compatibility = { level = "deny", priority = -1 }
|
rust_2021_compatibility = { level = "deny", priority = -1 }
|
||||||
|
# rust_2024_compatibility = { level = "deny", priority = -1 } # Enable once we are at MSRV 1.81.0
|
||||||
|
single_use_lifetimes = "deny"
|
||||||
trivial_casts = "deny"
|
trivial_casts = "deny"
|
||||||
trivial_numeric_casts = "deny"
|
trivial_numeric_casts = "deny"
|
||||||
unused = { level = "deny", priority = -1 }
|
unused = { level = "deny", priority = -1 }
|
||||||
unused_import_braces = "deny"
|
unused_import_braces = "deny"
|
||||||
unused_lifetimes = "deny"
|
unused_lifetimes = "deny"
|
||||||
deprecated_in_future = "deny"
|
unused_qualifications = "deny"
|
||||||
|
variant_size_differences = "deny"
|
||||||
|
# The lints below are part of the rust_2024_compatibility group
|
||||||
|
static-mut-refs = "deny"
|
||||||
|
unsafe-op-in-unsafe-fn = "deny"
|
||||||
|
|
||||||
|
# https://rust-lang.github.io/rust-clippy/stable/index.html
|
||||||
[lints.clippy]
|
[lints.clippy]
|
||||||
# Allow
|
# Warn
|
||||||
# We need this since Rust v1.76+, since it has some bugs
|
dbg_macro = "warn"
|
||||||
# https://github.com/rust-lang/rust-clippy/issues/12016
|
todo = "warn"
|
||||||
blocks_in_conditions = "allow"
|
|
||||||
|
|
||||||
# Deny
|
# Deny
|
||||||
|
case_sensitive_file_extension_comparisons = "deny"
|
||||||
cast_lossless = "deny"
|
cast_lossless = "deny"
|
||||||
clone_on_ref_ptr = "deny"
|
clone_on_ref_ptr = "deny"
|
||||||
equatable_if_let = "deny"
|
equatable_if_let = "deny"
|
||||||
|
filter_map_next = "deny"
|
||||||
float_cmp_const = "deny"
|
float_cmp_const = "deny"
|
||||||
inefficient_to_string = "deny"
|
inefficient_to_string = "deny"
|
||||||
iter_on_empty_collections = "deny"
|
iter_on_empty_collections = "deny"
|
||||||
@@ -234,13 +247,18 @@ macro_use_imports = "deny"
|
|||||||
manual_assert = "deny"
|
manual_assert = "deny"
|
||||||
manual_instant_elapsed = "deny"
|
manual_instant_elapsed = "deny"
|
||||||
manual_string_new = "deny"
|
manual_string_new = "deny"
|
||||||
|
match_on_vec_items = "deny"
|
||||||
match_wildcard_for_single_variants = "deny"
|
match_wildcard_for_single_variants = "deny"
|
||||||
mem_forget = "deny"
|
mem_forget = "deny"
|
||||||
|
needless_continue = "deny"
|
||||||
needless_lifetimes = "deny"
|
needless_lifetimes = "deny"
|
||||||
|
option_option = "deny"
|
||||||
string_add_assign = "deny"
|
string_add_assign = "deny"
|
||||||
string_to_string = "deny"
|
string_to_string = "deny"
|
||||||
unnecessary_join = "deny"
|
unnecessary_join = "deny"
|
||||||
unnecessary_self_imports = "deny"
|
unnecessary_self_imports = "deny"
|
||||||
|
unnested_or_patterns = "deny"
|
||||||
unused_async = "deny"
|
unused_async = "deny"
|
||||||
|
unused_self = "deny"
|
||||||
verbose_file_reads = "deny"
|
verbose_file_reads = "deny"
|
||||||
zero_sized_map_values = "deny"
|
zero_sized_map_values = "deny"
|
||||||
|
12
SECURITY.md
12
SECURITY.md
@@ -39,7 +39,11 @@ Thank you for helping keep Vaultwarden and our users safe!
|
|||||||
|
|
||||||
# How to contact us
|
# How to contact us
|
||||||
|
|
||||||
- You can contact us on Matrix https://matrix.to/#/#vaultwarden:matrix.org (user: `@danig:matrix.org`)
|
- You can contact us on Matrix https://matrix.to/#/#vaultwarden:matrix.org (users: `@danig:matrix.org` and/or `@blackdex:matrix.org`)
|
||||||
- You can send an  to report a security issue.
|
- You can send an  to report a security issue.<br>
|
||||||
- If you want to send an encrypted email you can use the following GPG key:<br>
|
If you want to send an encrypted email you can use the following GPG key: 13BB3A34C9E380258CE43D595CB150B31F6426BC<br>
|
||||||
https://keyserver.ubuntu.com/pks/lookup?search=0xB9B7A108373276BF3C0406F9FC8A7D14C3CD543A&fingerprint=on&op=index
|
It can be found on several public GPG key servers.<br>
|
||||||
|
* https://keys.openpgp.org/search?q=security%40vaultwarden.org
|
||||||
|
* https://keys.mailvelope.com/pks/lookup?op=get&search=security%40vaultwarden.org
|
||||||
|
* https://pgpkeys.eu/pks/lookup?search=security%40vaultwarden.org&fingerprint=on&op=index
|
||||||
|
* https://keyserver.ubuntu.com/pks/lookup?search=security%40vaultwarden.org&fingerprint=on&op=index
|
||||||
|
@@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
vault_version: "v2024.5.1b"
|
vault_version: "v2024.6.2c"
|
||||||
vault_image_digest: "sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375"
|
vault_image_digest: "sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b"
|
||||||
# Cross Compile Docker Helper Scripts v1.4.0
|
# Cross Compile Docker Helper Scripts v1.5.0
|
||||||
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||||
xx_image_digest: "sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4"
|
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
|
||||||
rust_version: 1.79.0 # Rust version to be used
|
xx_image_digest: "sha256:1978e7a58a1777cb0ef0dde76bad60b7914b21da57cfa88047875e4f364297aa"
|
||||||
|
rust_version: 1.81.0 # Rust version to be used
|
||||||
debian_version: bookworm # Debian release name to be used
|
debian_version: bookworm # Debian release name to be used
|
||||||
alpine_version: "3.20" # Alpine version to be used
|
alpine_version: "3.20" # Alpine version to be used
|
||||||
# For which platforms/architectures will we try to build images
|
# For which platforms/architectures will we try to build images
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
@@ -18,27 +19,27 @@
|
|||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2024.5.1b
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2c
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.5.1b
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2c
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375]
|
# [docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b
|
||||||
# [docker.io/vaultwarden/web-vault:v2024.5.1b]
|
# [docker.io/vaultwarden/web-vault:v2024.6.2c]
|
||||||
#
|
#
|
||||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375 as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b AS vault
|
||||||
|
|
||||||
########################## ALPINE BUILD IMAGES ##########################
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.79.0 as build_amd64
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.81.0 AS build_amd64
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.79.0 as build_arm64
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.81.0 AS build_arm64
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.79.0 as build_armv7
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.81.0 AS build_armv7
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.79.0 as build_armv6
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.81.0 AS build_armv6
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# hadolint ignore=DL3006
|
# hadolint ignore=DL3006
|
||||||
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} as build
|
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} AS build
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ARG TARGETVARIANT
|
ARG TARGETVARIANT
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
@@ -142,7 +143,6 @@ RUN mkdir /data && \
|
|||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
@@ -18,24 +19,24 @@
|
|||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2024.5.1b
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2c
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.5.1b
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2c
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375]
|
# [docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b
|
||||||
# [docker.io/vaultwarden/web-vault:v2024.5.1b]
|
# [docker.io/vaultwarden/web-vault:v2024.6.2c]
|
||||||
#
|
#
|
||||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375 as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b AS vault
|
||||||
|
|
||||||
########################## Cross Compile Docker Helper Scripts ##########################
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
## And these bash scripts do not have any significant difference if at all
|
## And these bash scripts do not have any significant difference if at all
|
||||||
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:1978e7a58a1777cb0ef0dde76bad60b7914b21da57cfa88047875e4f364297aa AS xx
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# hadolint ignore=DL3006
|
# hadolint ignore=DL3006
|
||||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.79.0-slim-bookworm as build
|
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.81.0-slim-bookworm AS build
|
||||||
COPY --from=xx / /
|
COPY --from=xx / /
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ARG TARGETVARIANT
|
ARG TARGETVARIANT
|
||||||
@@ -185,7 +186,6 @@ RUN mkdir /data && \
|
|||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
@@ -26,7 +27,7 @@
|
|||||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
||||||
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
||||||
#
|
#
|
||||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} AS vault
|
||||||
|
|
||||||
{% if base == "debian" %}
|
{% if base == "debian" %}
|
||||||
########################## Cross Compile Docker Helper Scripts ##########################
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
@@ -38,13 +39,13 @@ FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx
|
|||||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
{% for arch in build_stage_image[base].arch_image %}
|
{% for arch in build_stage_image[base].arch_image %}
|
||||||
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} as build_{{ arch }}
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} AS build_{{ arch }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# hadolint ignore=DL3006
|
# hadolint ignore=DL3006
|
||||||
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} as build
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} AS build
|
||||||
{% if base == "debian" %}
|
{% if base == "debian" %}
|
||||||
COPY --from=xx / /
|
COPY --from=xx / /
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -229,7 +230,6 @@ RUN mkdir /data && \
|
|||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
|
@@ -1,5 +1,9 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
|
if [ -n "${UMASK}" ]; then
|
||||||
|
umask "${UMASK}"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -r /etc/vaultwarden.sh ]; then
|
if [ -r /etc/vaultwarden.sh ]; then
|
||||||
. /etc/vaultwarden.sh
|
. /etc/vaultwarden.sh
|
||||||
elif [ -r /etc/bitwarden_rs.sh ]; then
|
elif [ -r /etc/bitwarden_rs.sh ]; then
|
||||||
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE twofactor_duo_ctx;
|
@@ -0,0 +1,8 @@
|
|||||||
|
CREATE TABLE twofactor_duo_ctx (
|
||||||
|
state VARCHAR(64) NOT NULL,
|
||||||
|
user_email VARCHAR(255) NOT NULL,
|
||||||
|
nonce VARCHAR(64) NOT NULL,
|
||||||
|
exp BIGINT NOT NULL,
|
||||||
|
|
||||||
|
PRIMARY KEY (state)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE `twofactor_incomplete` DROP COLUMN `device_type`;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE twofactor_incomplete ADD COLUMN device_type INTEGER NOT NULL DEFAULT 14; -- 14 = Unknown Browser
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE twofactor_duo_ctx;
|
@@ -0,0 +1,8 @@
|
|||||||
|
CREATE TABLE twofactor_duo_ctx (
|
||||||
|
state VARCHAR(64) NOT NULL,
|
||||||
|
user_email VARCHAR(255) NOT NULL,
|
||||||
|
nonce VARCHAR(64) NOT NULL,
|
||||||
|
exp BIGINT NOT NULL,
|
||||||
|
|
||||||
|
PRIMARY KEY (state)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE twofactor_incomplete DROP COLUMN device_type;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE twofactor_incomplete ADD COLUMN device_type INTEGER NOT NULL DEFAULT 14; -- 14 = Unknown Browser
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE twofactor_duo_ctx;
|
@@ -0,0 +1,8 @@
|
|||||||
|
CREATE TABLE twofactor_duo_ctx (
|
||||||
|
state TEXT NOT NULL,
|
||||||
|
user_email TEXT NOT NULL,
|
||||||
|
nonce TEXT NOT NULL,
|
||||||
|
exp INTEGER NOT NULL,
|
||||||
|
|
||||||
|
PRIMARY KEY (state)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE `twofactor_incomplete` DROP COLUMN `device_type`;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE twofactor_incomplete ADD COLUMN device_type INTEGER NOT NULL DEFAULT 14; -- 14 = Unknown Browser
|
@@ -1,4 +1,4 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "1.79.0"
|
channel = "1.81.0"
|
||||||
components = [ "rustfmt", "clippy" ]
|
components = [ "rustfmt", "clippy" ]
|
||||||
profile = "minimal"
|
profile = "minimal"
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
use reqwest::Method;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::env;
|
use std::env;
|
||||||
@@ -17,13 +18,14 @@ use crate::{
|
|||||||
core::{log_event, two_factor},
|
core::{log_event, two_factor},
|
||||||
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
|
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
|
||||||
},
|
},
|
||||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp, Secure},
|
||||||
config::ConfigBuilder,
|
config::ConfigBuilder,
|
||||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||||
error::{Error, MapResult},
|
error::{Error, MapResult},
|
||||||
|
http_client::make_http_request,
|
||||||
mail,
|
mail,
|
||||||
util::{
|
util::{
|
||||||
container_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client,
|
container_base_image, format_naive_datetime_local, get_display_size, get_web_vault_version,
|
||||||
is_running_in_container, NumberOrString,
|
is_running_in_container, NumberOrString,
|
||||||
},
|
},
|
||||||
CONFIG, VERSION,
|
CONFIG, VERSION,
|
||||||
@@ -168,7 +170,12 @@ struct LoginForm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/", data = "<data>")]
|
#[post("/", data = "<data>")]
|
||||||
fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp) -> Result<Redirect, AdminResponse> {
|
fn post_admin_login(
|
||||||
|
data: Form<LoginForm>,
|
||||||
|
cookies: &CookieJar<'_>,
|
||||||
|
ip: ClientIp,
|
||||||
|
secure: Secure,
|
||||||
|
) -> Result<Redirect, AdminResponse> {
|
||||||
let data = data.into_inner();
|
let data = data.into_inner();
|
||||||
let redirect = data.redirect;
|
let redirect = data.redirect;
|
||||||
|
|
||||||
@@ -190,9 +197,10 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
|||||||
|
|
||||||
let cookie = Cookie::build((COOKIE_NAME, jwt))
|
let cookie = Cookie::build((COOKIE_NAME, jwt))
|
||||||
.path(admin_path())
|
.path(admin_path())
|
||||||
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
.max_age(time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||||
.same_site(SameSite::Strict)
|
.same_site(SameSite::Strict)
|
||||||
.http_only(true);
|
.http_only(true)
|
||||||
|
.secure(secure.https);
|
||||||
|
|
||||||
cookies.add(cookie);
|
cookies.add(cookie);
|
||||||
if let Some(redirect) = redirect {
|
if let Some(redirect) = redirect {
|
||||||
@@ -290,7 +298,7 @@ async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbCon
|
|||||||
|
|
||||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
mail::send_invite(user, None, None, &CONFIG.invitation_org_name(), None).await
|
||||||
} else {
|
} else {
|
||||||
let invitation = Invitation::new(&user.email);
|
let invitation = Invitation::new(&user.email);
|
||||||
invitation.save(conn).await
|
invitation.save(conn).await
|
||||||
@@ -466,7 +474,7 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) ->
|
|||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
mail::send_invite(&user, None, None, &CONFIG.invitation_org_name(), None).await
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -568,11 +576,6 @@ async fn delete_organization(uuid: &str, _token: AdminToken, mut conn: DbConn) -
|
|||||||
org.delete(&mut conn).await
|
org.delete(&mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct WebVaultVersion {
|
|
||||||
version: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct GitRelease {
|
struct GitRelease {
|
||||||
tag_name: String,
|
tag_name: String,
|
||||||
@@ -594,15 +597,15 @@ struct TimeApi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||||
let json_api = get_reqwest_client();
|
Ok(make_http_request(Method::GET, url)?.send().await?.error_for_status()?.json::<T>().await?)
|
||||||
|
|
||||||
Ok(json_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn has_http_access() -> bool {
|
async fn has_http_access() -> bool {
|
||||||
let http_access = get_reqwest_client();
|
let req = match make_http_request(Method::HEAD, "https://github.com/dani-garcia/vaultwarden") {
|
||||||
|
Ok(r) => r,
|
||||||
match http_access.head("https://github.com/dani-garcia/vaultwarden").send().await {
|
Err(_) => return false,
|
||||||
|
};
|
||||||
|
match req.send().await {
|
||||||
Ok(r) => r.status().is_success(),
|
Ok(r) => r.status().is_success(),
|
||||||
_ => false,
|
_ => false,
|
||||||
}
|
}
|
||||||
@@ -672,18 +675,6 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use std::net::ToSocketAddrs;
|
use std::net::ToSocketAddrs;
|
||||||
|
|
||||||
// Get current running versions
|
|
||||||
let web_vault_version: WebVaultVersion =
|
|
||||||
match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) {
|
|
||||||
Ok(s) => serde_json::from_str(&s)?,
|
|
||||||
_ => match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
|
||||||
Ok(s) => serde_json::from_str(&s)?,
|
|
||||||
_ => WebVaultVersion {
|
|
||||||
version: String::from("Version file missing"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// Execute some environment checks
|
// Execute some environment checks
|
||||||
let running_within_container = is_running_in_container();
|
let running_within_container = is_running_in_container();
|
||||||
let has_http_access = has_http_access().await;
|
let has_http_access = has_http_access().await;
|
||||||
@@ -703,13 +694,16 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
|
|
||||||
let ip_header_name = &ip_header.0.unwrap_or_default();
|
let ip_header_name = &ip_header.0.unwrap_or_default();
|
||||||
|
|
||||||
|
// Get current running versions
|
||||||
|
let web_vault_version = get_web_vault_version();
|
||||||
|
|
||||||
let diagnostics_json = json!({
|
let diagnostics_json = json!({
|
||||||
"dns_resolved": dns_resolved,
|
"dns_resolved": dns_resolved,
|
||||||
"current_release": VERSION,
|
"current_release": VERSION,
|
||||||
"latest_release": latest_release,
|
"latest_release": latest_release,
|
||||||
"latest_commit": latest_commit,
|
"latest_commit": latest_commit,
|
||||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||||
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
|
"web_vault_version": web_vault_version,
|
||||||
"latest_web_build": latest_web_build,
|
"latest_web_build": latest_web_build,
|
||||||
"running_within_container": running_within_container,
|
"running_within_container": running_within_container,
|
||||||
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
|
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
|
||||||
@@ -723,8 +717,8 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
"db_version": get_sql_server_version(&mut conn).await,
|
"db_version": get_sql_server_version(&mut conn).await,
|
||||||
"admin_url": format!("{}/diagnostics", admin_url()),
|
"admin_url": format!("{}/diagnostics", admin_url()),
|
||||||
"overrides": &CONFIG.get_overrides().join(", "),
|
"overrides": &CONFIG.get_overrides().join(", "),
|
||||||
"host_arch": std::env::consts::ARCH,
|
"host_arch": env::consts::ARCH,
|
||||||
"host_os": std::env::consts::OS,
|
"host_os": env::consts::OS,
|
||||||
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the server date/time check as late as possible to minimize the time difference
|
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the server date/time check as late as possible to minimize the time difference
|
||||||
"ntp_time": get_ntp_time(has_http_access).await, // Run the ntp check as late as possible to minimize the time difference
|
"ntp_time": get_ntp_time(has_http_access).await, // Run the ntp check as late as possible to minimize the time difference
|
||||||
@@ -743,18 +737,27 @@ fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
|
|||||||
#[post("/config", data = "<data>")]
|
#[post("/config", data = "<data>")]
|
||||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||||
let data: ConfigBuilder = data.into_inner();
|
let data: ConfigBuilder = data.into_inner();
|
||||||
CONFIG.update_config(data)
|
if let Err(e) = CONFIG.update_config(data) {
|
||||||
|
err!(format!("Unable to save config: {e:?}"))
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/config/delete")]
|
#[post("/config/delete")]
|
||||||
fn delete_config(_token: AdminToken) -> EmptyResult {
|
fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||||
CONFIG.delete_user_config()
|
if let Err(e) = CONFIG.delete_user_config() {
|
||||||
|
err!(format!("Unable to delete config: {e:?}"))
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/config/backup_db")]
|
#[post("/config/backup_db")]
|
||||||
async fn backup_db(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn backup_db(_token: AdminToken, mut conn: DbConn) -> ApiResult<String> {
|
||||||
if *CAN_BACKUP {
|
if *CAN_BACKUP {
|
||||||
backup_database(&mut conn).await
|
match backup_database(&mut conn).await {
|
||||||
|
Ok(f) => Ok(format!("Backup to '{f}' was successful")),
|
||||||
|
Err(e) => err!(format!("Backup was unsuccessful {e}")),
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
err!("Can't back up current DB (Only SQLite supports this feature)");
|
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||||
}
|
}
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
use crate::db::DbPool;
|
use crate::db::DbPool;
|
||||||
use chrono::Utc;
|
use chrono::{SecondsFormat, Utc};
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
@@ -112,7 +112,7 @@ async fn is_email_2fa_required(org_user_uuid: Option<String>, conn: &mut DbConn)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if org_user_uuid.is_some() {
|
if org_user_uuid.is_some() {
|
||||||
return OrgPolicy::is_enabled_by_org(&org_user_uuid.unwrap(), OrgPolicyType::TwoFactorAuthentication, conn)
|
return OrgPolicy::is_enabled_for_member(&org_user_uuid.unwrap(), OrgPolicyType::TwoFactorAuthentication, conn)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
@@ -223,7 +223,7 @@ pub async fn _register(data: Json<RegisterData>, mut conn: DbConn) -> JsonResult
|
|||||||
}
|
}
|
||||||
|
|
||||||
if verified_by_invite && is_email_2fa_required(data.organization_user_id, &mut conn).await {
|
if verified_by_invite && is_email_2fa_required(data.organization_user_id, &mut conn).await {
|
||||||
let _ = email::activate_email_2fa(&user, &mut conn).await;
|
email::activate_email_2fa(&user, &mut conn).await.ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -232,7 +232,7 @@ pub async fn _register(data: Json<RegisterData>, mut conn: DbConn) -> JsonResult
|
|||||||
// accept any open emergency access invitations
|
// accept any open emergency access invitations
|
||||||
if !CONFIG.mail_enabled() && CONFIG.emergency_access_allowed() {
|
if !CONFIG.mail_enabled() && CONFIG.emergency_access_allowed() {
|
||||||
for mut emergency_invite in EmergencyAccess::find_all_invited_by_grantee_email(&user.email, &mut conn).await {
|
for mut emergency_invite in EmergencyAccess::find_all_invited_by_grantee_email(&user.email, &mut conn).await {
|
||||||
let _ = emergency_invite.accept_invite(&user.uuid, &user.email, &mut conn).await;
|
emergency_invite.accept_invite(&user.uuid, &user.email, &mut conn).await.ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -490,7 +490,7 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
|||||||
// Bitwarden does not process the import if there is one item invalid.
|
// Bitwarden does not process the import if there is one item invalid.
|
||||||
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
|
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
|
||||||
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
||||||
Cipher::validate_notes(&data.ciphers)?;
|
Cipher::validate_cipher_data(&data.ciphers)?;
|
||||||
|
|
||||||
let user_uuid = &headers.user.uuid;
|
let user_uuid = &headers.user.uuid;
|
||||||
|
|
||||||
@@ -1038,7 +1038,7 @@ async fn put_device_token(uuid: &str, data: Json<PushToken>, headers: Headers, m
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
} else {
|
} else {
|
||||||
// Try to unregister already registered device
|
// Try to unregister already registered device
|
||||||
let _ = unregister_push_device(device.push_uuid).await;
|
unregister_push_device(device.push_uuid).await.ok();
|
||||||
}
|
}
|
||||||
// clear the push_uuid
|
// clear the push_uuid
|
||||||
device.push_uuid = None;
|
device.push_uuid = None;
|
||||||
@@ -1123,7 +1123,7 @@ async fn post_auth_request(
|
|||||||
"requestIpAddress": auth_request.request_ip,
|
"requestIpAddress": auth_request.request_ip,
|
||||||
"key": null,
|
"key": null,
|
||||||
"masterPasswordHash": null,
|
"masterPasswordHash": null,
|
||||||
"creationDate": auth_request.creation_date.and_utc(),
|
"creationDate": auth_request.creation_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true),
|
||||||
"responseDate": null,
|
"responseDate": null,
|
||||||
"requestApproved": false,
|
"requestApproved": false,
|
||||||
"origin": CONFIG.domain_origin(),
|
"origin": CONFIG.domain_origin(),
|
||||||
@@ -1140,7 +1140,9 @@ async fn get_auth_request(uuid: &str, mut conn: DbConn) -> JsonResult {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
let response_date_utc = auth_request
|
||||||
|
.response_date
|
||||||
|
.map(|response_date| response_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true));
|
||||||
|
|
||||||
Ok(Json(json!(
|
Ok(Json(json!(
|
||||||
{
|
{
|
||||||
@@ -1150,7 +1152,7 @@ async fn get_auth_request(uuid: &str, mut conn: DbConn) -> JsonResult {
|
|||||||
"requestIpAddress": auth_request.request_ip,
|
"requestIpAddress": auth_request.request_ip,
|
||||||
"key": auth_request.enc_key,
|
"key": auth_request.enc_key,
|
||||||
"masterPasswordHash": auth_request.master_password_hash,
|
"masterPasswordHash": auth_request.master_password_hash,
|
||||||
"creationDate": auth_request.creation_date.and_utc(),
|
"creationDate": auth_request.creation_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true),
|
||||||
"responseDate": response_date_utc,
|
"responseDate": response_date_utc,
|
||||||
"requestApproved": auth_request.approved,
|
"requestApproved": auth_request.approved,
|
||||||
"origin": CONFIG.domain_origin(),
|
"origin": CONFIG.domain_origin(),
|
||||||
@@ -1195,7 +1197,9 @@ async fn put_auth_request(
|
|||||||
nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, data.device_identifier, &mut conn).await;
|
nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, data.device_identifier, &mut conn).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
let response_date_utc = auth_request
|
||||||
|
.response_date
|
||||||
|
.map(|response_date| response_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true));
|
||||||
|
|
||||||
Ok(Json(json!(
|
Ok(Json(json!(
|
||||||
{
|
{
|
||||||
@@ -1205,7 +1209,7 @@ async fn put_auth_request(
|
|||||||
"requestIpAddress": auth_request.request_ip,
|
"requestIpAddress": auth_request.request_ip,
|
||||||
"key": auth_request.enc_key,
|
"key": auth_request.enc_key,
|
||||||
"masterPasswordHash": auth_request.master_password_hash,
|
"masterPasswordHash": auth_request.master_password_hash,
|
||||||
"creationDate": auth_request.creation_date.and_utc(),
|
"creationDate": auth_request.creation_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true),
|
||||||
"responseDate": response_date_utc,
|
"responseDate": response_date_utc,
|
||||||
"requestApproved": auth_request.approved,
|
"requestApproved": auth_request.approved,
|
||||||
"origin": CONFIG.domain_origin(),
|
"origin": CONFIG.domain_origin(),
|
||||||
@@ -1227,7 +1231,9 @@ async fn get_auth_request_response(uuid: &str, code: &str, mut conn: DbConn) ->
|
|||||||
err!("Access code invalid doesn't exist")
|
err!("Access code invalid doesn't exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
let response_date_utc = auth_request
|
||||||
|
.response_date
|
||||||
|
.map(|response_date| response_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true));
|
||||||
|
|
||||||
Ok(Json(json!(
|
Ok(Json(json!(
|
||||||
{
|
{
|
||||||
@@ -1237,7 +1243,7 @@ async fn get_auth_request_response(uuid: &str, code: &str, mut conn: DbConn) ->
|
|||||||
"requestIpAddress": auth_request.request_ip,
|
"requestIpAddress": auth_request.request_ip,
|
||||||
"key": auth_request.enc_key,
|
"key": auth_request.enc_key,
|
||||||
"masterPasswordHash": auth_request.master_password_hash,
|
"masterPasswordHash": auth_request.master_password_hash,
|
||||||
"creationDate": auth_request.creation_date.and_utc(),
|
"creationDate": auth_request.creation_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true),
|
||||||
"responseDate": response_date_utc,
|
"responseDate": response_date_utc,
|
||||||
"requestApproved": auth_request.approved,
|
"requestApproved": auth_request.approved,
|
||||||
"origin": CONFIG.domain_origin(),
|
"origin": CONFIG.domain_origin(),
|
||||||
@@ -1255,7 +1261,7 @@ async fn get_auth_requests(headers: Headers, mut conn: DbConn) -> JsonResult {
|
|||||||
.iter()
|
.iter()
|
||||||
.filter(|request| request.approved.is_none())
|
.filter(|request| request.approved.is_none())
|
||||||
.map(|request| {
|
.map(|request| {
|
||||||
let response_date_utc = request.response_date.map(|response_date| response_date.and_utc());
|
let response_date_utc = request.response_date.map(|response_date| response_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true));
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"id": request.uuid,
|
"id": request.uuid,
|
||||||
@@ -1264,7 +1270,7 @@ async fn get_auth_requests(headers: Headers, mut conn: DbConn) -> JsonResult {
|
|||||||
"requestIpAddress": request.request_ip,
|
"requestIpAddress": request.request_ip,
|
||||||
"key": request.enc_key,
|
"key": request.enc_key,
|
||||||
"masterPasswordHash": request.master_password_hash,
|
"masterPasswordHash": request.master_password_hash,
|
||||||
"creationDate": request.creation_date.and_utc(),
|
"creationDate": request.creation_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true),
|
||||||
"responseDate": response_date_utc,
|
"responseDate": response_date_utc,
|
||||||
"requestApproved": request.approved,
|
"requestApproved": request.approved,
|
||||||
"origin": CONFIG.domain_origin(),
|
"origin": CONFIG.domain_origin(),
|
||||||
|
@@ -208,6 +208,7 @@ pub struct CipherData {
|
|||||||
// Folder id is not included in import
|
// Folder id is not included in import
|
||||||
folder_id: Option<String>,
|
folder_id: Option<String>,
|
||||||
// TODO: Some of these might appear all the time, no need for Option
|
// TODO: Some of these might appear all the time, no need for Option
|
||||||
|
#[serde(alias = "organizationID")]
|
||||||
pub organization_id: Option<String>,
|
pub organization_id: Option<String>,
|
||||||
|
|
||||||
key: Option<String>,
|
key: Option<String>,
|
||||||
@@ -232,7 +233,7 @@ pub struct CipherData {
|
|||||||
favorite: Option<bool>,
|
favorite: Option<bool>,
|
||||||
reprompt: Option<i32>,
|
reprompt: Option<i32>,
|
||||||
|
|
||||||
password_history: Option<Value>,
|
pub password_history: Option<Value>,
|
||||||
|
|
||||||
// These are used during key rotation
|
// These are used during key rotation
|
||||||
// 'Attachments' is unused, contains map of {id: filename}
|
// 'Attachments' is unused, contains map of {id: filename}
|
||||||
@@ -377,8 +378,9 @@ pub async fn update_cipher_from_data(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(note) = &data.notes {
|
if let Some(note) = &data.notes {
|
||||||
if note.len() > 10_000 {
|
let max_note_size = CONFIG._max_note_size();
|
||||||
err!("The field Notes exceeds the maximum encrypted value length of 10000 characters.")
|
if note.len() > max_note_size {
|
||||||
|
err!(format!("The field Notes exceeds the maximum encrypted value length of {max_note_size} characters."))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -561,7 +563,7 @@ async fn post_ciphers_import(
|
|||||||
// Bitwarden does not process the import if there is one item invalid.
|
// Bitwarden does not process the import if there is one item invalid.
|
||||||
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
|
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
|
||||||
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
||||||
Cipher::validate_notes(&data.ciphers)?;
|
Cipher::validate_cipher_data(&data.ciphers)?;
|
||||||
|
|
||||||
// Read and create the folders
|
// Read and create the folders
|
||||||
let existing_folders: Vec<String> =
|
let existing_folders: Vec<String> =
|
||||||
@@ -701,6 +703,7 @@ async fn put_cipher_partial(
|
|||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct CollectionsAdminData {
|
struct CollectionsAdminData {
|
||||||
|
#[serde(alias = "CollectionIds")]
|
||||||
collection_ids: Vec<String>,
|
collection_ids: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -12,6 +12,7 @@ pub use accounts::purge_auth_requests;
|
|||||||
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
||||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||||
|
use reqwest::Method;
|
||||||
pub use sends::purge_sends;
|
pub use sends::purge_sends;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
@@ -53,7 +54,8 @@ use crate::{
|
|||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::DbConn,
|
db::DbConn,
|
||||||
error::Error,
|
error::Error,
|
||||||
util::{get_reqwest_client, parse_experimental_client_feature_flags},
|
http_client::make_http_request,
|
||||||
|
util::parse_experimental_client_feature_flags,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
@@ -139,9 +141,7 @@ async fn hibp_breach(username: &str) -> JsonResult {
|
|||||||
);
|
);
|
||||||
|
|
||||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||||
let hibp_client = get_reqwest_client();
|
let res = make_http_request(Method::GET, &url)?.header("hibp-api-key", api_key).send().await?;
|
||||||
|
|
||||||
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send().await?;
|
|
||||||
|
|
||||||
// If we get a 404, return a 404, it means no breached accounts
|
// If we get a 404, return a 404, it means no breached accounts
|
||||||
if res.status() == 404 {
|
if res.status() == 404 {
|
||||||
@@ -190,6 +190,8 @@ fn config() -> Json<Value> {
|
|||||||
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
||||||
// Force the new key rotation feature
|
// Force the new key rotation feature
|
||||||
feature_states.insert("key-rotation-improvements".to_string(), true);
|
feature_states.insert("key-rotation-improvements".to_string(), true);
|
||||||
|
feature_states.insert("flexible-collections-v-1".to_string(), false);
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
// Note: The clients use this version to handle backwards compatibility concerns
|
// Note: The clients use this version to handle backwards compatibility concerns
|
||||||
// This means they expect a version that closely matches the Bitwarden server version
|
// This means they expect a version that closely matches the Bitwarden server version
|
||||||
@@ -200,8 +202,10 @@ fn config() -> Json<Value> {
|
|||||||
"gitHash": option_env!("GIT_REV"),
|
"gitHash": option_env!("GIT_REV"),
|
||||||
"server": {
|
"server": {
|
||||||
"name": "Vaultwarden",
|
"name": "Vaultwarden",
|
||||||
"url": "https://github.com/dani-garcia/vaultwarden",
|
"url": "https://github.com/dani-garcia/vaultwarden"
|
||||||
"version": crate::VERSION
|
},
|
||||||
|
"settings": {
|
||||||
|
"disableUserRegistration": !crate::CONFIG.signups_allowed() && crate::CONFIG.signups_domains_whitelist().is_empty(),
|
||||||
},
|
},
|
||||||
"environment": {
|
"environment": {
|
||||||
"vault": domain,
|
"vault": domain,
|
||||||
|
@@ -2,6 +2,7 @@ use num_traits::FromPrimitive;
|
|||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
@@ -39,6 +40,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
delete_organization_collection,
|
delete_organization_collection,
|
||||||
post_organization_collection_delete,
|
post_organization_collection_delete,
|
||||||
bulk_delete_organization_collections,
|
bulk_delete_organization_collections,
|
||||||
|
post_bulk_collections,
|
||||||
get_org_details,
|
get_org_details,
|
||||||
get_org_users,
|
get_org_users,
|
||||||
send_invite,
|
send_invite,
|
||||||
@@ -65,6 +67,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
import,
|
import,
|
||||||
post_org_keys,
|
post_org_keys,
|
||||||
get_organization_keys,
|
get_organization_keys,
|
||||||
|
get_organization_public_key,
|
||||||
bulk_public_keys,
|
bulk_public_keys,
|
||||||
deactivate_organization_user,
|
deactivate_organization_user,
|
||||||
bulk_deactivate_organization_user,
|
bulk_deactivate_organization_user,
|
||||||
@@ -506,7 +509,7 @@ async fn post_organization_collection_update(
|
|||||||
CollectionUser::save(&org_user.user_uuid, col_id, user.read_only, user.hide_passwords, &mut conn).await?;
|
CollectionUser::save(&org_user.user_uuid, col_id, user.read_only, user.hide_passwords, &mut conn).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(collection.to_json()))
|
Ok(Json(collection.to_json_details(&headers.user.uuid, None, &mut conn).await))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/organizations/<org_id>/collections/<col_id>/user/<org_user_id>")]
|
#[delete("/organizations/<org_id>/collections/<col_id>/user/<org_user_id>")]
|
||||||
@@ -749,12 +752,19 @@ struct OrgIdData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/ciphers/organization-details?<data..>")]
|
#[get("/ciphers/organization-details?<data..>")]
|
||||||
async fn get_org_details(data: OrgIdData, headers: Headers, mut conn: DbConn) -> Json<Value> {
|
async fn get_org_details(data: OrgIdData, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
Json(json!({
|
if UserOrganization::find_confirmed_by_user_and_org(&headers.user.uuid, &data.organization_id, &mut conn)
|
||||||
|
.await
|
||||||
|
.is_none()
|
||||||
|
{
|
||||||
|
err_code!("Resource not found.", rocket::http::Status::NotFound.code);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
"data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await,
|
"data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await,
|
||||||
"object": "list",
|
"object": "list",
|
||||||
"continuationToken": null,
|
"continuationToken": null,
|
||||||
}))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn _get_org_details(org_id: &str, host: &str, user_uuid: &str, conn: &mut DbConn) -> Value {
|
async fn _get_org_details(org_id: &str, host: &str, user_uuid: &str, conn: &mut DbConn) -> Value {
|
||||||
@@ -844,7 +854,8 @@ struct InviteData {
|
|||||||
groups: Vec<String>,
|
groups: Vec<String>,
|
||||||
r#type: NumberOrString,
|
r#type: NumberOrString,
|
||||||
collections: Option<Vec<CollectionData>>,
|
collections: Option<Vec<CollectionData>>,
|
||||||
access_all: Option<bool>,
|
#[serde(default)]
|
||||||
|
access_all: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<org_id>/users/invite", data = "<data>")]
|
#[post("/organizations/<org_id>/users/invite", data = "<data>")]
|
||||||
@@ -896,7 +907,7 @@ async fn send_invite(org_id: &str, data: Json<InviteData>, headers: AdminHeaders
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut new_user = UserOrganization::new(user.uuid.clone(), String::from(org_id));
|
let mut new_user = UserOrganization::new(user.uuid.clone(), String::from(org_id));
|
||||||
let access_all = data.access_all.unwrap_or(false);
|
let access_all = data.access_all;
|
||||||
new_user.access_all = access_all;
|
new_user.access_all = access_all;
|
||||||
new_user.atype = new_type;
|
new_user.atype = new_type;
|
||||||
new_user.status = user_org_status;
|
new_user.status = user_org_status;
|
||||||
@@ -945,8 +956,7 @@ async fn send_invite(org_id: &str, data: Json<InviteData>, headers: AdminHeaders
|
|||||||
};
|
};
|
||||||
|
|
||||||
mail::send_invite(
|
mail::send_invite(
|
||||||
&email,
|
&user,
|
||||||
&user.uuid,
|
|
||||||
Some(String::from(org_id)),
|
Some(String::from(org_id)),
|
||||||
Some(new_user.uuid),
|
Some(new_user.uuid),
|
||||||
&org_name,
|
&org_name,
|
||||||
@@ -997,14 +1007,6 @@ async fn reinvite_user(org_id: &str, user_org: &str, headers: AdminHeaders, mut
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, conn: &mut DbConn) -> EmptyResult {
|
async fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
if !CONFIG.invitations_allowed() {
|
|
||||||
err!("Invitations are not allowed.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !CONFIG.mail_enabled() {
|
|
||||||
err!("SMTP is not configured.")
|
|
||||||
}
|
|
||||||
|
|
||||||
let user_org = match UserOrganization::find_by_uuid(user_org, conn).await {
|
let user_org = match UserOrganization::find_by_uuid(user_org, conn).await {
|
||||||
Some(user_org) => user_org,
|
Some(user_org) => user_org,
|
||||||
None => err!("The user hasn't been invited to the organization."),
|
None => err!("The user hasn't been invited to the organization."),
|
||||||
@@ -1019,6 +1021,10 @@ async fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, co
|
|||||||
None => err!("User not found."),
|
None => err!("User not found."),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if !CONFIG.invitations_allowed() && user.password_hash.is_empty() {
|
||||||
|
err!("Invitations are not allowed.")
|
||||||
|
}
|
||||||
|
|
||||||
let org_name = match Organization::find_by_uuid(org_id, conn).await {
|
let org_name = match Organization::find_by_uuid(org_id, conn).await {
|
||||||
Some(org) => org.name,
|
Some(org) => org.name,
|
||||||
None => err!("Error looking up organization."),
|
None => err!("Error looking up organization."),
|
||||||
@@ -1026,17 +1032,21 @@ async fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, co
|
|||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_invite(
|
mail::send_invite(
|
||||||
&user.email,
|
&user,
|
||||||
&user.uuid,
|
|
||||||
Some(org_id.to_string()),
|
Some(org_id.to_string()),
|
||||||
Some(user_org.uuid),
|
Some(user_org.uuid),
|
||||||
&org_name,
|
&org_name,
|
||||||
Some(invited_by_email.to_string()),
|
Some(invited_by_email.to_string()),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else if user.password_hash.is_empty() {
|
||||||
let invitation = Invitation::new(&user.email);
|
let invitation = Invitation::new(&user.email);
|
||||||
invitation.save(conn).await?;
|
invitation.save(conn).await?;
|
||||||
|
} else {
|
||||||
|
let _ = Invitation::take(&user.email, conn).await;
|
||||||
|
let mut user_org = user_org;
|
||||||
|
user_org.status = UserOrgStatus::Accepted as i32;
|
||||||
|
user_org.save(conn).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -1296,6 +1306,7 @@ struct EditUserData {
|
|||||||
r#type: NumberOrString,
|
r#type: NumberOrString,
|
||||||
collections: Option<Vec<CollectionData>>,
|
collections: Option<Vec<CollectionData>>,
|
||||||
groups: Option<Vec<String>>,
|
groups: Option<Vec<String>>,
|
||||||
|
#[serde(default)]
|
||||||
access_all: bool,
|
access_all: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1585,7 +1596,7 @@ async fn post_org_import(
|
|||||||
// Bitwarden does not process the import if there is one item invalid.
|
// Bitwarden does not process the import if there is one item invalid.
|
||||||
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
|
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
|
||||||
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
||||||
Cipher::validate_notes(&data.ciphers)?;
|
Cipher::validate_cipher_data(&data.ciphers)?;
|
||||||
|
|
||||||
let mut collections = Vec::new();
|
let mut collections = Vec::new();
|
||||||
for coll in data.collections {
|
for coll in data.collections {
|
||||||
@@ -1628,6 +1639,66 @@ async fn post_org_import(
|
|||||||
user.update_revision(&mut conn).await
|
user.update_revision(&mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
#[allow(dead_code)]
|
||||||
|
struct BulkCollectionsData {
|
||||||
|
organization_id: String,
|
||||||
|
cipher_ids: Vec<String>,
|
||||||
|
collection_ids: HashSet<String>,
|
||||||
|
remove_collections: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
// This endpoint is only reachable via the organization view, therefor this endpoint is located here
|
||||||
|
// Also Bitwarden does not send out Notifications for these changes, it only does this for individual cipher collection updates
|
||||||
|
#[post("/ciphers/bulk-collections", data = "<data>")]
|
||||||
|
async fn post_bulk_collections(data: Json<BulkCollectionsData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
let data: BulkCollectionsData = data.into_inner();
|
||||||
|
|
||||||
|
// This feature does not seem to be active on all the clients
|
||||||
|
// To prevent future issues, add a check to block a call when this is set to true
|
||||||
|
if data.remove_collections {
|
||||||
|
err!("Bulk removing of collections is not yet implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get all the collection available to the user in one query
|
||||||
|
// Also filter based upon the provided collections
|
||||||
|
let user_collections: HashMap<String, Collection> =
|
||||||
|
Collection::find_by_organization_and_user_uuid(&data.organization_id, &headers.user.uuid, &mut conn)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|c| {
|
||||||
|
if data.collection_ids.contains(&c.uuid) {
|
||||||
|
Some((c.uuid.clone(), c))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Verify if all the collections requested exists and are writeable for the user, else abort
|
||||||
|
for collection_uuid in &data.collection_ids {
|
||||||
|
match user_collections.get(collection_uuid) {
|
||||||
|
Some(collection) if collection.is_writable_by_user(&headers.user.uuid, &mut conn).await => (),
|
||||||
|
_ => err_code!("Resource not found", "User does not have access to a collection", 404),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for cipher_id in data.cipher_ids.iter() {
|
||||||
|
// Only act on existing cipher uuid's
|
||||||
|
// Do not abort the operation just ignore it, it could be a cipher was just deleted for example
|
||||||
|
if let Some(cipher) = Cipher::find_by_uuid_and_org(cipher_id, &data.organization_id, &mut conn).await {
|
||||||
|
if cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await {
|
||||||
|
for collection in &data.collection_ids {
|
||||||
|
CollectionCipher::save(&cipher.uuid, collection, &mut conn).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/policies")]
|
#[get("/organizations/<org_id>/policies")]
|
||||||
async fn list_policies(org_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> Json<Value> {
|
async fn list_policies(org_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> Json<Value> {
|
||||||
let policies = OrgPolicy::find_by_org(org_id, &mut conn).await;
|
let policies = OrgPolicy::find_by_org(org_id, &mut conn).await;
|
||||||
@@ -1642,7 +1713,14 @@ async fn list_policies(org_id: &str, _headers: AdminHeaders, mut conn: DbConn) -
|
|||||||
|
|
||||||
#[get("/organizations/<org_id>/policies/token?<token>")]
|
#[get("/organizations/<org_id>/policies/token?<token>")]
|
||||||
async fn list_policies_token(org_id: &str, token: &str, mut conn: DbConn) -> JsonResult {
|
async fn list_policies_token(org_id: &str, token: &str, mut conn: DbConn) -> JsonResult {
|
||||||
let invite = crate::auth::decode_invite(token)?;
|
// web-vault 2024.6.2 seems to send these values and cause logs to output errors
|
||||||
|
// Catch this and prevent errors in the logs
|
||||||
|
// TODO: CleanUp after 2024.6.x is not used anymore.
|
||||||
|
if org_id == "undefined" && token == "undefined" {
|
||||||
|
return Ok(Json(json!({})));
|
||||||
|
}
|
||||||
|
|
||||||
|
let invite = decode_invite(token)?;
|
||||||
|
|
||||||
let invite_org_id = match invite.org_id {
|
let invite_org_id = match invite.org_id {
|
||||||
Some(invite_org_id) => invite_org_id,
|
Some(invite_org_id) => invite_org_id,
|
||||||
@@ -1702,6 +1780,38 @@ async fn put_policy(
|
|||||||
None => err!("Invalid or unsupported policy type"),
|
None => err!("Invalid or unsupported policy type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Bitwarden only allows the Reset Password policy when Single Org policy is enabled
|
||||||
|
// Vaultwarden encouraged to use multiple orgs instead of groups because groups were not available in the past
|
||||||
|
// Now that groups are available we can enforce this option when wanted.
|
||||||
|
// We put this behind a config option to prevent breaking current installation.
|
||||||
|
// Maybe we want to enable this by default in the future, but currently it is disabled by default.
|
||||||
|
if CONFIG.enforce_single_org_with_reset_pw_policy() {
|
||||||
|
if pol_type_enum == OrgPolicyType::ResetPassword && data.enabled {
|
||||||
|
let single_org_policy_enabled =
|
||||||
|
match OrgPolicy::find_by_org_and_type(org_id, OrgPolicyType::SingleOrg, &mut conn).await {
|
||||||
|
Some(p) => p.enabled,
|
||||||
|
None => false,
|
||||||
|
};
|
||||||
|
|
||||||
|
if !single_org_policy_enabled {
|
||||||
|
err!("Single Organization policy is not enabled. It is mandatory for this policy to be enabled.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also prevent the Single Org Policy to be disabled if the Reset Password policy is enabled
|
||||||
|
if pol_type_enum == OrgPolicyType::SingleOrg && !data.enabled {
|
||||||
|
let reset_pw_policy_enabled =
|
||||||
|
match OrgPolicy::find_by_org_and_type(org_id, OrgPolicyType::ResetPassword, &mut conn).await {
|
||||||
|
Some(p) => p.enabled,
|
||||||
|
None => false,
|
||||||
|
};
|
||||||
|
|
||||||
|
if reset_pw_policy_enabled {
|
||||||
|
err!("Account recovery policy is enabled. It is not allowed to disable this policy.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// When enabling the TwoFactorAuthentication policy, revoke all members that do not have 2FA
|
// When enabling the TwoFactorAuthentication policy, revoke all members that do not have 2FA
|
||||||
if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled {
|
if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled {
|
||||||
two_factor::enforce_2fa_policy_for_org(
|
two_factor::enforce_2fa_policy_for_org(
|
||||||
@@ -1925,8 +2035,7 @@ async fn import(org_id: &str, data: Json<OrgImportData>, headers: Headers, mut c
|
|||||||
};
|
};
|
||||||
|
|
||||||
mail::send_invite(
|
mail::send_invite(
|
||||||
&user_data.email,
|
&user,
|
||||||
&user.uuid,
|
|
||||||
Some(String::from(org_id)),
|
Some(String::from(org_id)),
|
||||||
Some(new_org_user.uuid),
|
Some(new_org_user.uuid),
|
||||||
&org_name,
|
&org_name,
|
||||||
@@ -2196,13 +2305,14 @@ async fn _restore_organization_user(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/groups")]
|
#[get("/organizations/<org_id>/groups")]
|
||||||
async fn get_groups(org_id: &str, _headers: ManagerHeadersLoose, mut conn: DbConn) -> JsonResult {
|
async fn get_groups(org_id: &str, headers: ManagerHeadersLoose, mut conn: DbConn) -> JsonResult {
|
||||||
let groups: Vec<Value> = if CONFIG.org_groups_enabled() {
|
let groups: Vec<Value> = if CONFIG.org_groups_enabled() {
|
||||||
// Group::find_by_organization(&org_id, &mut conn).await.iter().map(Group::to_json).collect::<Value>()
|
// Group::find_by_organization(&org_id, &mut conn).await.iter().map(Group::to_json).collect::<Value>()
|
||||||
let groups = Group::find_by_organization(org_id, &mut conn).await;
|
let groups = Group::find_by_organization(org_id, &mut conn).await;
|
||||||
let mut groups_json = Vec::with_capacity(groups.len());
|
let mut groups_json = Vec::with_capacity(groups.len());
|
||||||
|
|
||||||
for g in groups {
|
for g in groups {
|
||||||
groups_json.push(g.to_json_details(&mut conn).await)
|
groups_json.push(g.to_json_details(&headers.org_user.atype, &mut conn).await)
|
||||||
}
|
}
|
||||||
groups_json
|
groups_json
|
||||||
} else {
|
} else {
|
||||||
@@ -2222,7 +2332,8 @@ async fn get_groups(org_id: &str, _headers: ManagerHeadersLoose, mut conn: DbCon
|
|||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct GroupRequest {
|
struct GroupRequest {
|
||||||
name: String,
|
name: String,
|
||||||
access_all: Option<bool>,
|
#[serde(default)]
|
||||||
|
access_all: bool,
|
||||||
external_id: Option<String>,
|
external_id: Option<String>,
|
||||||
collections: Vec<SelectionReadOnly>,
|
collections: Vec<SelectionReadOnly>,
|
||||||
users: Vec<String>,
|
users: Vec<String>,
|
||||||
@@ -2230,17 +2341,12 @@ struct GroupRequest {
|
|||||||
|
|
||||||
impl GroupRequest {
|
impl GroupRequest {
|
||||||
pub fn to_group(&self, organizations_uuid: &str) -> Group {
|
pub fn to_group(&self, organizations_uuid: &str) -> Group {
|
||||||
Group::new(
|
Group::new(String::from(organizations_uuid), self.name.clone(), self.access_all, self.external_id.clone())
|
||||||
String::from(organizations_uuid),
|
|
||||||
self.name.clone(),
|
|
||||||
self.access_all.unwrap_or(false),
|
|
||||||
self.external_id.clone(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_group(&self, mut group: Group) -> Group {
|
pub fn update_group(&self, mut group: Group) -> Group {
|
||||||
group.name.clone_from(&self.name);
|
group.name.clone_from(&self.name);
|
||||||
group.access_all = self.access_all.unwrap_or(false);
|
group.access_all = self.access_all;
|
||||||
// Group Updates do not support changing the external_id
|
// Group Updates do not support changing the external_id
|
||||||
// These input fields are in a disabled state, and can only be updated/added via ldap_import
|
// These input fields are in a disabled state, and can only be updated/added via ldap_import
|
||||||
|
|
||||||
@@ -2394,7 +2500,7 @@ async fn add_update_group(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<_org_id>/groups/<group_id>/details")]
|
#[get("/organizations/<_org_id>/groups/<group_id>/details")]
|
||||||
async fn get_group_details(_org_id: &str, group_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult {
|
async fn get_group_details(_org_id: &str, group_id: &str, headers: AdminHeaders, mut conn: DbConn) -> JsonResult {
|
||||||
if !CONFIG.org_groups_enabled() {
|
if !CONFIG.org_groups_enabled() {
|
||||||
err!("Group support is disabled");
|
err!("Group support is disabled");
|
||||||
}
|
}
|
||||||
@@ -2404,7 +2510,7 @@ async fn get_group_details(_org_id: &str, group_id: &str, _headers: AdminHeaders
|
|||||||
_ => err!("Group could not be found!"),
|
_ => err!("Group could not be found!"),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(group.to_json_details(&mut conn).await))
|
Ok(Json(group.to_json_details(&(headers.org_user_type as i32), &mut conn).await))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<org_id>/groups/<group_id>/delete")]
|
#[post("/organizations/<org_id>/groups/<group_id>/delete")]
|
||||||
@@ -2680,20 +2786,29 @@ struct OrganizationUserResetPasswordRequest {
|
|||||||
key: String,
|
key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/keys")]
|
// Upstrem reports this is the renamed endpoint instead of `/keys`
|
||||||
async fn get_organization_keys(org_id: &str, mut conn: DbConn) -> JsonResult {
|
// But the clients do not seem to use this at all
|
||||||
|
// Just add it here in case they will
|
||||||
|
#[get("/organizations/<org_id>/public-key")]
|
||||||
|
async fn get_organization_public_key(org_id: &str, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let org = match Organization::find_by_uuid(org_id, &mut conn).await {
|
let org = match Organization::find_by_uuid(org_id, &mut conn).await {
|
||||||
Some(organization) => organization,
|
Some(organization) => organization,
|
||||||
None => err!("Organization not found"),
|
None => err!("Organization not found"),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"object": "organizationKeys",
|
"object": "organizationPublicKey",
|
||||||
"publicKey": org.public_key,
|
"publicKey": org.public_key,
|
||||||
"privateKey": org.private_key,
|
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Obsolete - Renamed to public-key (2023.8), left for backwards compatibility with older clients
|
||||||
|
// https://github.com/bitwarden/server/blob/25dc0c9178e3e3584074bbef0d4be827b7c89415/src/Api/AdminConsole/Controllers/OrganizationsController.cs#L463-L468
|
||||||
|
#[get("/organizations/<org_id>/keys")]
|
||||||
|
async fn get_organization_keys(org_id: &str, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
get_organization_public_key(org_id, headers, conn).await
|
||||||
|
}
|
||||||
|
|
||||||
#[put("/organizations/<org_id>/users/<org_user_id>/reset-password", data = "<data>")]
|
#[put("/organizations/<org_id>/users/<org_user_id>/reset-password", data = "<data>")]
|
||||||
async fn put_reset_password(
|
async fn put_reset_password(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use rocket::{
|
use rocket::{
|
||||||
request::{self, FromRequest, Outcome},
|
request::{FromRequest, Outcome},
|
||||||
serde::json::Json,
|
serde::json::Json,
|
||||||
Request, Route,
|
Request, Route,
|
||||||
};
|
};
|
||||||
@@ -123,15 +123,8 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
|
|||||||
None => err!("Error looking up organization"),
|
None => err!("Error looking up organization"),
|
||||||
};
|
};
|
||||||
|
|
||||||
mail::send_invite(
|
mail::send_invite(&user, Some(org_id.clone()), Some(new_org_user.uuid), &org_name, Some(org_email))
|
||||||
&user_data.email,
|
.await?;
|
||||||
&user.uuid,
|
|
||||||
Some(org_id.clone()),
|
|
||||||
Some(new_org_user.uuid),
|
|
||||||
&org_name,
|
|
||||||
Some(org_email),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -199,7 +192,7 @@ pub struct PublicToken(String);
|
|||||||
impl<'r> FromRequest<'r> for PublicToken {
|
impl<'r> FromRequest<'r> for PublicToken {
|
||||||
type Error = &'static str;
|
type Error = &'static str;
|
||||||
|
|
||||||
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
let headers = request.headers();
|
let headers = request.headers();
|
||||||
// Get access_token
|
// Get access_token
|
||||||
let access_token: &str = match headers.get_one("Authorization") {
|
let access_token: &str = match headers.get_one("Authorization") {
|
||||||
|
@@ -349,7 +349,15 @@ async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbC
|
|||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L243
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
pub struct SendFileData {
|
||||||
|
id: String,
|
||||||
|
size: u64,
|
||||||
|
fileName: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/bitwarden/server/blob/66f95d1c443490b653e5a15d32977e2f5a3f9e32/src/Api/Tools/Controllers/SendsController.cs#L250
|
||||||
#[post("/sends/<send_uuid>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
|
#[post("/sends/<send_uuid>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
|
||||||
async fn post_send_file_v2_data(
|
async fn post_send_file_v2_data(
|
||||||
send_uuid: &str,
|
send_uuid: &str,
|
||||||
@@ -367,15 +375,55 @@ async fn post_send_file_v2_data(
|
|||||||
err!("Send not found. Unable to save the file.")
|
err!("Send not found. Unable to save the file.")
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if send.atype != SendType::File as i32 {
|
||||||
|
err!("Send is not a file type send.");
|
||||||
|
}
|
||||||
|
|
||||||
let Some(send_user_id) = &send.user_uuid else {
|
let Some(send_user_id) = &send.user_uuid else {
|
||||||
err!("Sends are only supported for users at the moment")
|
err!("Sends are only supported for users at the moment.")
|
||||||
};
|
};
|
||||||
|
|
||||||
if send_user_id != &headers.user.uuid {
|
if send_user_id != &headers.user.uuid {
|
||||||
err!("Send doesn't belong to user");
|
err!("Send doesn't belong to user.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let Ok(send_data) = serde_json::from_str::<SendFileData>(&send.data) else {
|
||||||
|
err!("Unable to decode send data as json.")
|
||||||
|
};
|
||||||
|
|
||||||
|
match data.data.raw_name() {
|
||||||
|
Some(raw_file_name) if raw_file_name.dangerous_unsafe_unsanitized_raw() == send_data.fileName => (),
|
||||||
|
Some(raw_file_name) => err!(
|
||||||
|
"Send file name does not match.",
|
||||||
|
format!(
|
||||||
|
"Expected file name '{}' got '{}'",
|
||||||
|
send_data.fileName,
|
||||||
|
raw_file_name.dangerous_unsafe_unsanitized_raw()
|
||||||
|
)
|
||||||
|
),
|
||||||
|
_ => err!("Send file name does not match or is not provided."),
|
||||||
|
}
|
||||||
|
|
||||||
|
if file_id != send_data.id {
|
||||||
|
err!("Send file does not match send data.", format!("Expected id {} got {file_id}", send_data.id));
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(size) = data.data.len().to_u64() else {
|
||||||
|
err!("Send file size overflow.");
|
||||||
|
};
|
||||||
|
|
||||||
|
if size != send_data.size {
|
||||||
|
err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size));
|
||||||
}
|
}
|
||||||
|
|
||||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_uuid);
|
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_uuid);
|
||||||
let file_path = folder_path.join(file_id);
|
let file_path = folder_path.join(file_id);
|
||||||
|
|
||||||
|
// Check if the file already exists, if that is the case do not overwrite it
|
||||||
|
if tokio::fs::metadata(&file_path).await.is_ok() {
|
||||||
|
err!("Send file has already been uploaded.", format!("File {file_path:?} already exists"))
|
||||||
|
}
|
||||||
|
|
||||||
tokio::fs::create_dir_all(&folder_path).await?;
|
tokio::fs::create_dir_all(&folder_path).await?;
|
||||||
|
|
||||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||||
|
@@ -15,7 +15,7 @@ use crate::{
|
|||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
error::MapResult,
|
error::MapResult,
|
||||||
util::get_reqwest_client,
|
http_client::make_http_request,
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -210,10 +210,7 @@ async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData)
|
|||||||
|
|
||||||
let m = Method::from_str(method).unwrap_or_default();
|
let m = Method::from_str(method).unwrap_or_default();
|
||||||
|
|
||||||
let client = get_reqwest_client();
|
make_http_request(m, &url)?
|
||||||
|
|
||||||
client
|
|
||||||
.request(m, &url)
|
|
||||||
.basic_auth(username, Some(password))
|
.basic_auth(username, Some(password))
|
||||||
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
|
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
|
||||||
.header(header::DATE, date)
|
.header(header::DATE, date)
|
||||||
@@ -255,7 +252,7 @@ async fn get_user_duo_data(uuid: &str, conn: &mut DbConn) -> DuoStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// let (ik, sk, ak, host) = get_duo_keys();
|
// let (ik, sk, ak, host) = get_duo_keys();
|
||||||
async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiResult<(String, String, String, String)> {
|
pub(crate) async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiResult<(String, String, String, String)> {
|
||||||
let data = match User::find_by_mail(email, conn).await {
|
let data = match User::find_by_mail(email, conn).await {
|
||||||
Some(u) => get_user_duo_data(&u.uuid, conn).await.data(),
|
Some(u) => get_user_duo_data(&u.uuid, conn).await.data(),
|
||||||
_ => DuoData::global(),
|
_ => DuoData::global(),
|
||||||
@@ -284,10 +281,6 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
// email is as entered by the user, so it needs to be normalized before
|
|
||||||
// comparison with auth_user below.
|
|
||||||
let email = &email.to_lowercase();
|
|
||||||
|
|
||||||
let split: Vec<&str> = response.split(':').collect();
|
let split: Vec<&str> = response.split(':').collect();
|
||||||
if split.len() != 2 {
|
if split.len() != 2 {
|
||||||
err!(
|
err!(
|
||||||
|
498
src/api/core/two_factor/duo_oidc.rs
Normal file
498
src/api/core/two_factor/duo_oidc.rs
Normal file
@@ -0,0 +1,498 @@
|
|||||||
|
use chrono::Utc;
|
||||||
|
use data_encoding::HEXLOWER;
|
||||||
|
use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey, Header, Validation};
|
||||||
|
use reqwest::{header, StatusCode};
|
||||||
|
use ring::digest::{digest, Digest, SHA512_256};
|
||||||
|
use serde::Serialize;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api::{core::two_factor::duo::get_duo_keys_email, EmptyResult},
|
||||||
|
crypto,
|
||||||
|
db::{
|
||||||
|
models::{EventType, TwoFactorDuoContext},
|
||||||
|
DbConn, DbPool,
|
||||||
|
},
|
||||||
|
error::Error,
|
||||||
|
http_client::make_http_request,
|
||||||
|
CONFIG,
|
||||||
|
};
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
|
// The location on this service that Duo should redirect users to. For us, this is a bridge
|
||||||
|
// built in to the Bitwarden clients.
|
||||||
|
// See: https://github.com/bitwarden/clients/blob/main/apps/web/src/connectors/duo-redirect.ts
|
||||||
|
const DUO_REDIRECT_LOCATION: &str = "duo-redirect-connector.html";
|
||||||
|
|
||||||
|
// Number of seconds that a JWT we generate for Duo should be valid for.
|
||||||
|
const JWT_VALIDITY_SECS: i64 = 300;
|
||||||
|
|
||||||
|
// Number of seconds that a Duo context stored in the database should be valid for.
|
||||||
|
const CTX_VALIDITY_SECS: i64 = 300;
|
||||||
|
|
||||||
|
// Expected algorithm used by Duo to sign JWTs.
|
||||||
|
const DUO_RESP_SIGNATURE_ALG: Algorithm = Algorithm::HS512;
|
||||||
|
|
||||||
|
// Signature algorithm we're using to sign JWTs for Duo. Must be either HS512 or HS256.
|
||||||
|
const JWT_SIGNATURE_ALG: Algorithm = Algorithm::HS512;
|
||||||
|
|
||||||
|
// Size of random strings for state and nonce. Must be at least 16 characters and at most 1024 characters.
|
||||||
|
// If increasing this above 64, also increase the size of the twofactor_duo_ctx.state and
|
||||||
|
// twofactor_duo_ctx.nonce database columns for postgres and mariadb.
|
||||||
|
const STATE_LENGTH: usize = 64;
|
||||||
|
|
||||||
|
// client_assertion payload for health checks and obtaining MFA results.
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct ClientAssertion {
|
||||||
|
pub iss: String,
|
||||||
|
pub sub: String,
|
||||||
|
pub aud: String,
|
||||||
|
pub exp: i64,
|
||||||
|
pub jti: String,
|
||||||
|
pub iat: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
// authorization request payload sent with clients to Duo for MFA
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct AuthorizationRequest {
|
||||||
|
pub response_type: String,
|
||||||
|
pub scope: String,
|
||||||
|
pub exp: i64,
|
||||||
|
pub client_id: String,
|
||||||
|
pub redirect_uri: String,
|
||||||
|
pub state: String,
|
||||||
|
pub duo_uname: String,
|
||||||
|
pub iss: String,
|
||||||
|
pub aud: String,
|
||||||
|
pub nonce: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duo service health check responses
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
enum HealthCheckResponse {
|
||||||
|
HealthOK {
|
||||||
|
stat: String,
|
||||||
|
},
|
||||||
|
HealthFail {
|
||||||
|
message: String,
|
||||||
|
message_detail: String,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Outer structure of response when exchanging authz code for MFA results
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct IdTokenResponse {
|
||||||
|
id_token: String, // IdTokenClaims
|
||||||
|
access_token: String,
|
||||||
|
expires_in: i64,
|
||||||
|
token_type: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inner structure of IdTokenResponse.id_token
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct IdTokenClaims {
|
||||||
|
preferred_username: String,
|
||||||
|
nonce: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duo OIDC Authorization Client
|
||||||
|
// See https://duo.com/docs/oauthapi
|
||||||
|
struct DuoClient {
|
||||||
|
client_id: String, // Duo Client ID (DuoData.ik)
|
||||||
|
client_secret: String, // Duo Client Secret (DuoData.sk)
|
||||||
|
api_host: String, // Duo API hostname (DuoData.host)
|
||||||
|
redirect_uri: String, // URL in this application clients should call for MFA verification
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DuoClient {
|
||||||
|
// Construct a new DuoClient
|
||||||
|
fn new(client_id: String, client_secret: String, api_host: String, redirect_uri: String) -> DuoClient {
|
||||||
|
DuoClient {
|
||||||
|
client_id,
|
||||||
|
client_secret,
|
||||||
|
api_host,
|
||||||
|
redirect_uri,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a client assertion for health checks and authorization code exchange.
|
||||||
|
fn new_client_assertion(&self, url: &str) -> ClientAssertion {
|
||||||
|
let now = Utc::now().timestamp();
|
||||||
|
let jwt_id = crypto::get_random_string_alphanum(STATE_LENGTH);
|
||||||
|
|
||||||
|
ClientAssertion {
|
||||||
|
iss: self.client_id.clone(),
|
||||||
|
sub: self.client_id.clone(),
|
||||||
|
aud: url.to_string(),
|
||||||
|
exp: now + JWT_VALIDITY_SECS,
|
||||||
|
jti: jwt_id,
|
||||||
|
iat: now,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a serde-serializable struct, attempt to encode it as a JWT
|
||||||
|
fn encode_duo_jwt<T: Serialize>(&self, jwt_payload: T) -> Result<String, Error> {
|
||||||
|
match jsonwebtoken::encode(
|
||||||
|
&Header::new(JWT_SIGNATURE_ALG),
|
||||||
|
&jwt_payload,
|
||||||
|
&EncodingKey::from_secret(self.client_secret.as_bytes()),
|
||||||
|
) {
|
||||||
|
Ok(token) => Ok(token),
|
||||||
|
Err(e) => err!(format!("Error encoding Duo JWT: {e:?}")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// "required" health check to verify the integration is configured and Duo's services
|
||||||
|
// are up.
|
||||||
|
// https://duo.com/docs/oauthapi#health-check
|
||||||
|
async fn health_check(&self) -> Result<(), Error> {
|
||||||
|
let health_check_url: String = format!("https://{}/oauth/v1/health_check", self.api_host);
|
||||||
|
|
||||||
|
let jwt_payload = self.new_client_assertion(&health_check_url);
|
||||||
|
|
||||||
|
let token = match self.encode_duo_jwt(jwt_payload) {
|
||||||
|
Ok(token) => token,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut post_body = HashMap::new();
|
||||||
|
post_body.insert("client_assertion", token);
|
||||||
|
post_body.insert("client_id", self.client_id.clone());
|
||||||
|
|
||||||
|
let res = match make_http_request(reqwest::Method::POST, &health_check_url)?
|
||||||
|
.header(header::USER_AGENT, "vaultwarden:Duo/2.0 (Rust)")
|
||||||
|
.form(&post_body)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => err!(format!("Error requesting Duo health check: {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let response: HealthCheckResponse = match res.json::<HealthCheckResponse>().await {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => err!(format!("Duo health check response decode error: {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let health_stat: String = match response {
|
||||||
|
HealthCheckResponse::HealthOK {
|
||||||
|
stat,
|
||||||
|
} => stat,
|
||||||
|
HealthCheckResponse::HealthFail {
|
||||||
|
message,
|
||||||
|
message_detail,
|
||||||
|
} => err!(format!("Duo health check FAIL response, msg: {}, detail: {}", message, message_detail)),
|
||||||
|
};
|
||||||
|
|
||||||
|
if health_stat != "OK" {
|
||||||
|
err!(format!("Duo health check failed, got OK-like body with stat {health_stat}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Constructs the URL for the authorization request endpoint on Duo's service.
|
||||||
|
// Clients are sent here to continue authentication.
|
||||||
|
// https://duo.com/docs/oauthapi#authorization-request
|
||||||
|
fn make_authz_req_url(&self, duo_username: &str, state: String, nonce: String) -> Result<String, Error> {
|
||||||
|
let now = Utc::now().timestamp();
|
||||||
|
|
||||||
|
let jwt_payload = AuthorizationRequest {
|
||||||
|
response_type: String::from("code"),
|
||||||
|
scope: String::from("openid"),
|
||||||
|
exp: now + JWT_VALIDITY_SECS,
|
||||||
|
client_id: self.client_id.clone(),
|
||||||
|
redirect_uri: self.redirect_uri.clone(),
|
||||||
|
state,
|
||||||
|
duo_uname: String::from(duo_username),
|
||||||
|
iss: self.client_id.clone(),
|
||||||
|
aud: format!("https://{}", self.api_host),
|
||||||
|
nonce,
|
||||||
|
};
|
||||||
|
|
||||||
|
let token = match self.encode_duo_jwt(jwt_payload) {
|
||||||
|
Ok(token) => token,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let authz_endpoint = format!("https://{}/oauth/v1/authorize", self.api_host);
|
||||||
|
let mut auth_url = match Url::parse(authz_endpoint.as_str()) {
|
||||||
|
Ok(url) => url,
|
||||||
|
Err(e) => err!(format!("Error parsing Duo authorization URL: {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut query_params = auth_url.query_pairs_mut();
|
||||||
|
query_params.append_pair("response_type", "code");
|
||||||
|
query_params.append_pair("client_id", self.client_id.as_str());
|
||||||
|
query_params.append_pair("request", token.as_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
let final_auth_url = auth_url.to_string();
|
||||||
|
Ok(final_auth_url)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exchange the authorization code obtained from an access token provided by the user
|
||||||
|
// for the result of the MFA and validate.
|
||||||
|
// See: https://duo.com/docs/oauthapi#access-token (under Response Format)
|
||||||
|
async fn exchange_authz_code_for_result(
|
||||||
|
&self,
|
||||||
|
duo_code: &str,
|
||||||
|
duo_username: &str,
|
||||||
|
nonce: &str,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if duo_code.is_empty() {
|
||||||
|
err!("Empty Duo authorization code")
|
||||||
|
}
|
||||||
|
|
||||||
|
let token_url = format!("https://{}/oauth/v1/token", self.api_host);
|
||||||
|
|
||||||
|
let jwt_payload = self.new_client_assertion(&token_url);
|
||||||
|
|
||||||
|
let token = match self.encode_duo_jwt(jwt_payload) {
|
||||||
|
Ok(token) => token,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut post_body = HashMap::new();
|
||||||
|
post_body.insert("grant_type", String::from("authorization_code"));
|
||||||
|
post_body.insert("code", String::from(duo_code));
|
||||||
|
|
||||||
|
// Must be the same URL that was supplied in the authorization request for the supplied duo_code
|
||||||
|
post_body.insert("redirect_uri", self.redirect_uri.clone());
|
||||||
|
|
||||||
|
post_body
|
||||||
|
.insert("client_assertion_type", String::from("urn:ietf:params:oauth:client-assertion-type:jwt-bearer"));
|
||||||
|
post_body.insert("client_assertion", token);
|
||||||
|
|
||||||
|
let res = match make_http_request(reqwest::Method::POST, &token_url)?
|
||||||
|
.header(header::USER_AGENT, "vaultwarden:Duo/2.0 (Rust)")
|
||||||
|
.form(&post_body)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => err!(format!("Error exchanging Duo code: {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let status_code = res.status();
|
||||||
|
if status_code != StatusCode::OK {
|
||||||
|
err!(format!("Failure response from Duo: {}", status_code))
|
||||||
|
}
|
||||||
|
|
||||||
|
let response: IdTokenResponse = match res.json::<IdTokenResponse>().await {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => err!(format!("Error decoding ID token response: {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut validation = Validation::new(DUO_RESP_SIGNATURE_ALG);
|
||||||
|
validation.set_required_spec_claims(&["exp", "aud", "iss"]);
|
||||||
|
validation.set_audience(&[&self.client_id]);
|
||||||
|
validation.set_issuer(&[token_url.as_str()]);
|
||||||
|
|
||||||
|
let token_data = match jsonwebtoken::decode::<IdTokenClaims>(
|
||||||
|
&response.id_token,
|
||||||
|
&DecodingKey::from_secret(self.client_secret.as_bytes()),
|
||||||
|
&validation,
|
||||||
|
) {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(e) => err!(format!("Failed to decode Duo token {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let matching_nonces = crypto::ct_eq(nonce, &token_data.claims.nonce);
|
||||||
|
let matching_usernames = crypto::ct_eq(duo_username, &token_data.claims.preferred_username);
|
||||||
|
|
||||||
|
if !(matching_nonces && matching_usernames) {
|
||||||
|
err!("Error validating Duo authorization, nonce or username mismatch.")
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct DuoAuthContext {
|
||||||
|
pub state: String,
|
||||||
|
pub user_email: String,
|
||||||
|
pub nonce: String,
|
||||||
|
pub exp: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a state string, retrieve the associated Duo auth context and
|
||||||
|
// delete the retrieved state from the database.
|
||||||
|
async fn extract_context(state: &str, conn: &mut DbConn) -> Option<DuoAuthContext> {
|
||||||
|
let ctx: TwoFactorDuoContext = match TwoFactorDuoContext::find_by_state(state, conn).await {
|
||||||
|
Some(c) => c,
|
||||||
|
None => return None,
|
||||||
|
};
|
||||||
|
|
||||||
|
if ctx.exp < Utc::now().timestamp() {
|
||||||
|
ctx.delete(conn).await.ok();
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the context data, so that we can delete the context from
|
||||||
|
// the database before returning.
|
||||||
|
let ret_ctx = DuoAuthContext {
|
||||||
|
state: ctx.state.clone(),
|
||||||
|
user_email: ctx.user_email.clone(),
|
||||||
|
nonce: ctx.nonce.clone(),
|
||||||
|
exp: ctx.exp,
|
||||||
|
};
|
||||||
|
|
||||||
|
ctx.delete(conn).await.ok();
|
||||||
|
Some(ret_ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Task to clean up expired Duo authentication contexts that may have accumulated in the database.
|
||||||
|
pub async fn purge_duo_contexts(pool: DbPool) {
|
||||||
|
debug!("Purging Duo authentication contexts");
|
||||||
|
if let Ok(mut conn) = pool.get().await {
|
||||||
|
TwoFactorDuoContext::purge_expired_duo_contexts(&mut conn).await;
|
||||||
|
} else {
|
||||||
|
error!("Failed to get DB connection while purging expired Duo authentications")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct the url that Duo should redirect users to.
|
||||||
|
fn make_callback_url(client_name: &str) -> Result<String, Error> {
|
||||||
|
// Get the location of this application as defined in the config.
|
||||||
|
let base = match Url::parse(&format!("{}/", CONFIG.domain())) {
|
||||||
|
Ok(url) => url,
|
||||||
|
Err(e) => err!(format!("Error parsing configured domain URL (check your domain configuration): {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add the client redirect bridge location
|
||||||
|
let mut callback = match base.join(DUO_REDIRECT_LOCATION) {
|
||||||
|
Ok(url) => url,
|
||||||
|
Err(e) => err!(format!("Error constructing Duo redirect URL (check your domain configuration): {e:?}")),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add the 'client' string with the authenticating device type. The callback connector uses this
|
||||||
|
// information to figure out how it should handle certain clients.
|
||||||
|
{
|
||||||
|
let mut query_params = callback.query_pairs_mut();
|
||||||
|
query_params.append_pair("client", client_name);
|
||||||
|
}
|
||||||
|
Ok(callback.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pre-redirect first stage of the Duo OIDC authentication flow.
|
||||||
|
// Returns the "AuthUrl" that should be returned to clients for MFA.
|
||||||
|
pub async fn get_duo_auth_url(
|
||||||
|
email: &str,
|
||||||
|
client_id: &str,
|
||||||
|
device_identifier: &String,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?;
|
||||||
|
|
||||||
|
let callback_url = match make_callback_url(client_id) {
|
||||||
|
Ok(url) => url,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = DuoClient::new(ik, sk, host, callback_url);
|
||||||
|
|
||||||
|
match client.health_check().await {
|
||||||
|
Ok(()) => {}
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Generate random OAuth2 state and OIDC Nonce
|
||||||
|
let state: String = crypto::get_random_string_alphanum(STATE_LENGTH);
|
||||||
|
let nonce: String = crypto::get_random_string_alphanum(STATE_LENGTH);
|
||||||
|
|
||||||
|
// Bind the nonce to the device that's currently authing by hashing the nonce and device id
|
||||||
|
// and sending the result as the OIDC nonce.
|
||||||
|
let d: Digest = digest(&SHA512_256, format!("{nonce}{device_identifier}").as_bytes());
|
||||||
|
let hash: String = HEXLOWER.encode(d.as_ref());
|
||||||
|
|
||||||
|
match TwoFactorDuoContext::save(state.as_str(), email, nonce.as_str(), CTX_VALIDITY_SECS, conn).await {
|
||||||
|
Ok(()) => client.make_authz_req_url(email, state, hash),
|
||||||
|
Err(e) => err!(format!("Error saving Duo authentication context: {e:?}")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Post-redirect second stage of the Duo OIDC authentication flow.
|
||||||
|
// Exchanges an authorization code for the MFA result with Duo's API and validates the result.
|
||||||
|
pub async fn validate_duo_login(
|
||||||
|
email: &str,
|
||||||
|
two_factor_token: &str,
|
||||||
|
client_id: &str,
|
||||||
|
device_identifier: &str,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
// Result supplied to us by clients in the form "<authz code>|<state>"
|
||||||
|
let split: Vec<&str> = two_factor_token.split('|').collect();
|
||||||
|
if split.len() != 2 {
|
||||||
|
err!(
|
||||||
|
"Invalid response length",
|
||||||
|
ErrorEvent {
|
||||||
|
event: EventType::UserFailedLogIn2fa
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let code = split[0];
|
||||||
|
let state = split[1];
|
||||||
|
|
||||||
|
let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?;
|
||||||
|
|
||||||
|
// Get the context by the state reported by the client. If we don't have one,
|
||||||
|
// it means the context is either missing or expired.
|
||||||
|
let ctx = match extract_context(state, conn).await {
|
||||||
|
Some(c) => c,
|
||||||
|
None => {
|
||||||
|
err!(
|
||||||
|
"Error validating duo authentication",
|
||||||
|
ErrorEvent {
|
||||||
|
event: EventType::UserFailedLogIn2fa
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Context validation steps
|
||||||
|
let matching_usernames = crypto::ct_eq(email, &ctx.user_email);
|
||||||
|
|
||||||
|
// Probably redundant, but we're double-checking them anyway.
|
||||||
|
let matching_states = crypto::ct_eq(state, &ctx.state);
|
||||||
|
let unexpired_context = ctx.exp > Utc::now().timestamp();
|
||||||
|
|
||||||
|
if !(matching_usernames && matching_states && unexpired_context) {
|
||||||
|
err!(
|
||||||
|
"Error validating duo authentication",
|
||||||
|
ErrorEvent {
|
||||||
|
event: EventType::UserFailedLogIn2fa
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
let callback_url = match make_callback_url(client_id) {
|
||||||
|
Ok(url) => url,
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = DuoClient::new(ik, sk, host, callback_url);
|
||||||
|
|
||||||
|
match client.health_check().await {
|
||||||
|
Ok(()) => {}
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let d: Digest = digest(&SHA512_256, format!("{}{}", ctx.nonce, device_identifier).as_bytes());
|
||||||
|
let hash: String = HEXLOWER.encode(d.as_ref());
|
||||||
|
|
||||||
|
match client.exchange_authz_code_for_result(code, email, hash.as_str()).await {
|
||||||
|
Ok(_) => Ok(()),
|
||||||
|
Err(_) => {
|
||||||
|
err!(
|
||||||
|
"Error validating duo authentication",
|
||||||
|
ErrorEvent {
|
||||||
|
event: EventType::UserFailedLogIn2fa
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -24,7 +24,10 @@ pub fn routes() -> Vec<Route> {
|
|||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct SendEmailLoginData {
|
struct SendEmailLoginData {
|
||||||
|
// DeviceIdentifier: String, // Currently not used
|
||||||
|
#[serde(alias = "Email")]
|
||||||
email: String,
|
email: String,
|
||||||
|
#[serde(alias = "MasterPasswordHash")]
|
||||||
master_password_hash: String,
|
master_password_hash: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -289,7 +292,7 @@ impl EmailTokenData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_json(string: &str) -> Result<EmailTokenData, Error> {
|
pub fn from_json(string: &str) -> Result<EmailTokenData, Error> {
|
||||||
let res: Result<EmailTokenData, crate::serde_json::Error> = serde_json::from_str(string);
|
let res: Result<EmailTokenData, serde_json::Error> = serde_json::from_str(string);
|
||||||
match res {
|
match res {
|
||||||
Ok(x) => Ok(x),
|
Ok(x) => Ok(x),
|
||||||
Err(_) => err!("Could not decode EmailTokenData from string"),
|
Err(_) => err!("Could not decode EmailTokenData from string"),
|
||||||
|
@@ -19,6 +19,7 @@ use crate::{
|
|||||||
|
|
||||||
pub mod authenticator;
|
pub mod authenticator;
|
||||||
pub mod duo;
|
pub mod duo;
|
||||||
|
pub mod duo_oidc;
|
||||||
pub mod email;
|
pub mod email;
|
||||||
pub mod protected_actions;
|
pub mod protected_actions;
|
||||||
pub mod webauthn;
|
pub mod webauthn;
|
||||||
@@ -268,10 +269,24 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
|||||||
"User {} did not complete a 2FA login within the configured time limit. IP: {}",
|
"User {} did not complete a 2FA login within the configured time limit. IP: {}",
|
||||||
user.email, login.ip_address
|
user.email, login.ip_address
|
||||||
);
|
);
|
||||||
mail::send_incomplete_2fa_login(&user.email, &login.ip_address, &login.login_time, &login.device_name)
|
match mail::send_incomplete_2fa_login(
|
||||||
.await
|
&user.email,
|
||||||
.expect("Error sending incomplete 2FA email");
|
&login.ip_address,
|
||||||
login.delete(&mut conn).await.expect("Error deleting incomplete 2FA record");
|
&login.login_time,
|
||||||
|
&login.device_name,
|
||||||
|
&DeviceType::from_i32(login.device_type).to_string(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => {
|
||||||
|
if let Err(e) = login.delete(&mut conn).await {
|
||||||
|
error!("Error deleting incomplete 2FA record: {e:#?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Error sending incomplete 2FA email: {e:#?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -42,7 +42,7 @@ impl ProtectedActionData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_json(string: &str) -> Result<Self, Error> {
|
pub fn from_json(string: &str) -> Result<Self, Error> {
|
||||||
let res: Result<Self, crate::serde_json::Error> = serde_json::from_str(string);
|
let res: Result<Self, serde_json::Error> = serde_json::from_str(string);
|
||||||
match res {
|
match res {
|
||||||
Ok(x) => Ok(x),
|
Ok(x) => Ok(x),
|
||||||
Err(_) => err!("Could not decode ProtectedActionData from string"),
|
Err(_) => err!("Could not decode ProtectedActionData from string"),
|
||||||
|
@@ -49,7 +49,7 @@ fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
|
|||||||
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
|
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn jsonify_yubikeys(yubikeys: Vec<String>) -> serde_json::Value {
|
fn jsonify_yubikeys(yubikeys: Vec<String>) -> Value {
|
||||||
let mut result = Value::Object(serde_json::Map::new());
|
let mut result = Value::Object(serde_json::Map::new());
|
||||||
|
|
||||||
for (i, key) in yubikeys.into_iter().enumerate() {
|
for (i, key) in yubikeys.into_iter().enumerate() {
|
||||||
|
@@ -1,6 +1,7 @@
|
|||||||
use std::{
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
net::IpAddr,
|
net::IpAddr,
|
||||||
sync::{Arc, Mutex},
|
sync::Arc,
|
||||||
time::{Duration, SystemTime},
|
time::{Duration, SystemTime},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -22,7 +23,8 @@ use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader,
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
error::Error,
|
error::Error,
|
||||||
util::{get_reqwest_client_builder, Cached, CustomDnsResolver, CustomResolverError},
|
http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError},
|
||||||
|
util::Cached,
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -53,7 +55,6 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
|||||||
.timeout(icon_download_timeout)
|
.timeout(icon_download_timeout)
|
||||||
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
||||||
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
||||||
.dns_resolver(CustomDnsResolver::instance())
|
|
||||||
.default_headers(default_headers.clone())
|
.default_headers(default_headers.clone())
|
||||||
.build()
|
.build()
|
||||||
.expect("Failed to build client")
|
.expect("Failed to build client")
|
||||||
@@ -69,7 +70,8 @@ fn icon_external(domain: &str) -> Option<Redirect> {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
if is_domain_blacklisted(domain) {
|
if should_block_address(domain) {
|
||||||
|
warn!("Blocked address: {}", domain);
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -99,6 +101,15 @@ async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if should_block_address(domain) {
|
||||||
|
warn!("Blocked address: {}", domain);
|
||||||
|
return Cached::ttl(
|
||||||
|
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||||
|
CONFIG.icon_cache_negttl(),
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
match get_icon(domain).await {
|
match get_icon(domain).await {
|
||||||
Some((icon, icon_type)) => {
|
Some((icon, icon_type)) => {
|
||||||
Cached::ttl((ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true)
|
Cached::ttl((ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true)
|
||||||
@@ -144,30 +155,6 @@ fn is_valid_domain(domain: &str) -> bool {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_domain_blacklisted(domain: &str) -> bool {
|
|
||||||
let Some(config_blacklist) = CONFIG.icon_blacklist_regex() else {
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Compiled domain blacklist
|
|
||||||
static COMPILED_BLACKLIST: Mutex<Option<(String, Regex)>> = Mutex::new(None);
|
|
||||||
let mut guard = COMPILED_BLACKLIST.lock().unwrap();
|
|
||||||
|
|
||||||
// If the stored regex is up to date, use it
|
|
||||||
if let Some((value, regex)) = &*guard {
|
|
||||||
if value == &config_blacklist {
|
|
||||||
return regex.is_match(domain);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we don't have a regex stored, or it's not up to date, recreate it
|
|
||||||
let regex = Regex::new(&config_blacklist).unwrap();
|
|
||||||
let is_match = regex.is_match(domain);
|
|
||||||
*guard = Some((config_blacklist, regex));
|
|
||||||
|
|
||||||
is_match
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||||
|
|
||||||
@@ -195,9 +182,9 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
|||||||
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// If this error comes from the custom resolver, this means this is a blacklisted domain
|
// If this error comes from the custom resolver, this means this is a blocked domain
|
||||||
// or non global IP, don't save the miss file in this case to avoid leaking it
|
// or non global IP, don't save the miss file in this case to avoid leaking it
|
||||||
if let Some(error) = CustomResolverError::downcast_ref(&e) {
|
if let Some(error) = CustomHttpClientError::downcast_ref(&e) {
|
||||||
warn!("{error}");
|
warn!("{error}");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
@@ -353,7 +340,7 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
|||||||
|
|
||||||
// First check the domain as given during the request for HTTPS.
|
// First check the domain as given during the request for HTTPS.
|
||||||
let resp = match get_page(&ssldomain).await {
|
let resp = match get_page(&ssldomain).await {
|
||||||
Err(e) if CustomResolverError::downcast_ref(&e).is_none() => {
|
Err(e) if CustomHttpClientError::downcast_ref(&e).is_none() => {
|
||||||
// If we get an error that is not caused by the blacklist, we retry with HTTP
|
// If we get an error that is not caused by the blacklist, we retry with HTTP
|
||||||
match get_page(&httpdomain).await {
|
match get_page(&httpdomain).await {
|
||||||
mut sub_resp @ Err(_) => {
|
mut sub_resp @ Err(_) => {
|
||||||
@@ -460,6 +447,9 @@ async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Err
|
|||||||
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
||||||
/// ```
|
/// ```
|
||||||
fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
||||||
|
static PRIORITY_MAP: Lazy<HashMap<&'static str, u8>> =
|
||||||
|
Lazy::new(|| [(".png", 10), (".jpg", 20), (".jpeg", 20)].into_iter().collect());
|
||||||
|
|
||||||
// Check if there is a dimension set
|
// Check if there is a dimension set
|
||||||
let (width, height) = parse_sizes(sizes);
|
let (width, height) = parse_sizes(sizes);
|
||||||
|
|
||||||
@@ -484,13 +474,9 @@ fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
|||||||
200
|
200
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Change priority by file extension
|
match href.rsplit_once('.') {
|
||||||
if href.ends_with(".png") {
|
Some((_, extension)) => PRIORITY_MAP.get(&*extension.to_ascii_lowercase()).copied().unwrap_or(30),
|
||||||
10
|
None => 30,
|
||||||
} else if href.ends_with(".jpg") || href.ends_with(".jpeg") {
|
|
||||||
20
|
|
||||||
} else {
|
|
||||||
30
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -637,7 +623,7 @@ use cookie_store::CookieStore;
|
|||||||
pub struct Jar(std::sync::RwLock<CookieStore>);
|
pub struct Jar(std::sync::RwLock<CookieStore>);
|
||||||
|
|
||||||
impl reqwest::cookie::CookieStore for Jar {
|
impl reqwest::cookie::CookieStore for Jar {
|
||||||
fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &header::HeaderValue>, url: &url::Url) {
|
fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &HeaderValue>, url: &url::Url) {
|
||||||
use cookie::{Cookie as RawCookie, ParseError as RawCookieParseError};
|
use cookie::{Cookie as RawCookie, ParseError as RawCookieParseError};
|
||||||
use time::Duration;
|
use time::Duration;
|
||||||
|
|
||||||
@@ -656,7 +642,7 @@ impl reqwest::cookie::CookieStore for Jar {
|
|||||||
cookie_store.store_response_cookies(cookies, url);
|
cookie_store.store_response_cookies(cookies, url);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cookies(&self, url: &url::Url) -> Option<header::HeaderValue> {
|
fn cookies(&self, url: &url::Url) -> Option<HeaderValue> {
|
||||||
let cookie_store = self.0.read().unwrap();
|
let cookie_store = self.0.read().unwrap();
|
||||||
let s = cookie_store
|
let s = cookie_store
|
||||||
.get_request_values(url)
|
.get_request_values(url)
|
||||||
@@ -668,7 +654,7 @@ impl reqwest::cookie::CookieStore for Jar {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
header::HeaderValue::from_maybe_shared(Bytes::from(s)).ok()
|
HeaderValue::from_maybe_shared(Bytes::from(s)).ok()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -12,7 +12,7 @@ use crate::{
|
|||||||
core::{
|
core::{
|
||||||
accounts::{PreloginData, RegisterData, _prelogin, _register},
|
accounts::{PreloginData, RegisterData, _prelogin, _register},
|
||||||
log_user_event,
|
log_user_event,
|
||||||
two_factor::{authenticator, duo, email, enforce_2fa_policy, webauthn, yubikey},
|
two_factor::{authenticator, duo, duo_oidc, email, enforce_2fa_policy, webauthn, yubikey},
|
||||||
},
|
},
|
||||||
push::register_push_device,
|
push::register_push_device,
|
||||||
ApiResult, EmptyResult, JsonResult,
|
ApiResult, EmptyResult, JsonResult,
|
||||||
@@ -135,6 +135,18 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
|||||||
Ok(Json(result))
|
Ok(Json(result))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Default, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct MasterPasswordPolicy {
|
||||||
|
min_complexity: u8,
|
||||||
|
min_length: u32,
|
||||||
|
require_lower: bool,
|
||||||
|
require_upper: bool,
|
||||||
|
require_numbers: bool,
|
||||||
|
require_special: bool,
|
||||||
|
enforce_on_login: bool,
|
||||||
|
}
|
||||||
|
|
||||||
async fn _password_login(
|
async fn _password_login(
|
||||||
data: ConnectData,
|
data: ConnectData,
|
||||||
user_uuid: &mut Option<String>,
|
user_uuid: &mut Option<String>,
|
||||||
@@ -253,7 +265,7 @@ async fn _password_login(
|
|||||||
let twofactor_token = twofactor_auth(&user, &data, &mut device, ip, conn).await?;
|
let twofactor_token = twofactor_auth(&user, &data, &mut device, ip, conn).await?;
|
||||||
|
|
||||||
if CONFIG.mail_enabled() && new_device {
|
if CONFIG.mail_enabled() && new_device {
|
||||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device).await {
|
||||||
error!("Error sending new device email: {:#?}", e);
|
error!("Error sending new device email: {:#?}", e);
|
||||||
|
|
||||||
if CONFIG.require_device_email() {
|
if CONFIG.require_device_email() {
|
||||||
@@ -282,6 +294,36 @@ async fn _password_login(
|
|||||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||||
device.save(conn).await?;
|
device.save(conn).await?;
|
||||||
|
|
||||||
|
// Fetch all valid Master Password Policies and merge them into one with all true's and larges numbers as one policy
|
||||||
|
let master_password_policies: Vec<MasterPasswordPolicy> =
|
||||||
|
OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(
|
||||||
|
&user.uuid,
|
||||||
|
OrgPolicyType::MasterPassword,
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|p| serde_json::from_str(&p.data).ok())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let master_password_policy = if !master_password_policies.is_empty() {
|
||||||
|
let mut mpp_json = json!(master_password_policies.into_iter().reduce(|acc, policy| {
|
||||||
|
MasterPasswordPolicy {
|
||||||
|
min_complexity: acc.min_complexity.max(policy.min_complexity),
|
||||||
|
min_length: acc.min_length.max(policy.min_length),
|
||||||
|
require_lower: acc.require_lower || policy.require_lower,
|
||||||
|
require_upper: acc.require_upper || policy.require_upper,
|
||||||
|
require_numbers: acc.require_numbers || policy.require_numbers,
|
||||||
|
require_special: acc.require_special || policy.require_special,
|
||||||
|
enforce_on_login: acc.enforce_on_login || policy.enforce_on_login,
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
mpp_json["object"] = json!("masterPasswordPolicy");
|
||||||
|
mpp_json
|
||||||
|
} else {
|
||||||
|
json!({"object": "masterPasswordPolicy"})
|
||||||
|
};
|
||||||
|
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
"access_token": access_token,
|
"access_token": access_token,
|
||||||
"expires_in": expires_in,
|
"expires_in": expires_in,
|
||||||
@@ -297,9 +339,7 @@ async fn _password_login(
|
|||||||
"KdfParallelism": user.client_kdf_parallelism,
|
"KdfParallelism": user.client_kdf_parallelism,
|
||||||
"ResetMasterPassword": false, // TODO: Same as above
|
"ResetMasterPassword": false, // TODO: Same as above
|
||||||
"ForcePasswordReset": false,
|
"ForcePasswordReset": false,
|
||||||
"MasterPasswordPolicy": {
|
"MasterPasswordPolicy": master_password_policy,
|
||||||
"object": "masterPasswordPolicy",
|
|
||||||
},
|
|
||||||
|
|
||||||
"scope": scope,
|
"scope": scope,
|
||||||
"unofficialServer": true,
|
"unofficialServer": true,
|
||||||
@@ -381,7 +421,7 @@ async fn _user_api_key_login(
|
|||||||
|
|
||||||
if CONFIG.mail_enabled() && new_device {
|
if CONFIG.mail_enabled() && new_device {
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device).await {
|
||||||
error!("Error sending new device email: {:#?}", e);
|
error!("Error sending new device email: {:#?}", e);
|
||||||
|
|
||||||
if CONFIG.require_device_email() {
|
if CONFIG.require_device_email() {
|
||||||
@@ -495,14 +535,16 @@ async fn twofactor_auth(
|
|||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
TwoFactorIncomplete::mark_incomplete(&user.uuid, &device.uuid, &device.name, ip, conn).await?;
|
TwoFactorIncomplete::mark_incomplete(&user.uuid, &device.uuid, &device.name, device.atype, ip, conn).await?;
|
||||||
|
|
||||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
|
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
|
||||||
|
|
||||||
let twofactor_code = match data.two_factor_token {
|
let twofactor_code = match data.two_factor_token {
|
||||||
Some(ref code) => code,
|
Some(ref code) => code,
|
||||||
None => err_json!(_json_err_twofactor(&twofactor_ids, &user.uuid, conn).await?, "2FA token not provided"),
|
None => {
|
||||||
|
err_json!(_json_err_twofactor(&twofactor_ids, &user.uuid, data, conn).await?, "2FA token not provided")
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
||||||
@@ -519,7 +561,23 @@ async fn twofactor_auth(
|
|||||||
Some(TwoFactorType::Webauthn) => webauthn::validate_webauthn_login(&user.uuid, twofactor_code, conn).await?,
|
Some(TwoFactorType::Webauthn) => webauthn::validate_webauthn_login(&user.uuid, twofactor_code, conn).await?,
|
||||||
Some(TwoFactorType::YubiKey) => yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
|
Some(TwoFactorType::YubiKey) => yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
|
||||||
Some(TwoFactorType::Duo) => {
|
Some(TwoFactorType::Duo) => {
|
||||||
duo::validate_duo_login(data.username.as_ref().unwrap().trim(), twofactor_code, conn).await?
|
match CONFIG.duo_use_iframe() {
|
||||||
|
true => {
|
||||||
|
// Legacy iframe prompt flow
|
||||||
|
duo::validate_duo_login(&user.email, twofactor_code, conn).await?
|
||||||
|
}
|
||||||
|
false => {
|
||||||
|
// OIDC based flow
|
||||||
|
duo_oidc::validate_duo_login(
|
||||||
|
&user.email,
|
||||||
|
twofactor_code,
|
||||||
|
data.client_id.as_ref().unwrap(),
|
||||||
|
data.device_identifier.as_ref().unwrap(),
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Some(TwoFactorType::Email) => {
|
Some(TwoFactorType::Email) => {
|
||||||
email::validate_email_code_str(&user.uuid, twofactor_code, &selected_data?, conn).await?
|
email::validate_email_code_str(&user.uuid, twofactor_code, &selected_data?, conn).await?
|
||||||
@@ -532,7 +590,7 @@ async fn twofactor_auth(
|
|||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
err_json!(
|
err_json!(
|
||||||
_json_err_twofactor(&twofactor_ids, &user.uuid, conn).await?,
|
_json_err_twofactor(&twofactor_ids, &user.uuid, data, conn).await?,
|
||||||
"2FA Remember token not provided"
|
"2FA Remember token not provided"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -560,7 +618,12 @@ fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
|
|||||||
tf.map(|t| t.data).map_res("Two factor doesn't exist")
|
tf.map(|t| t.data).map_res("Two factor doesn't exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbConn) -> ApiResult<Value> {
|
async fn _json_err_twofactor(
|
||||||
|
providers: &[i32],
|
||||||
|
user_uuid: &str,
|
||||||
|
data: &ConnectData,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> ApiResult<Value> {
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
"error" : "invalid_grant",
|
"error" : "invalid_grant",
|
||||||
"error_description" : "Two factor required.",
|
"error_description" : "Two factor required.",
|
||||||
@@ -588,12 +651,30 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||||||
None => err!("User does not exist"),
|
None => err!("User does not exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let (signature, host) = duo::generate_duo_signature(&email, conn).await?;
|
match CONFIG.duo_use_iframe() {
|
||||||
|
true => {
|
||||||
|
// Legacy iframe prompt flow
|
||||||
|
let (signature, host) = duo::generate_duo_signature(&email, conn).await?;
|
||||||
|
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||||
|
"Host": host,
|
||||||
|
"Signature": signature,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
false => {
|
||||||
|
// OIDC based flow
|
||||||
|
let auth_url = duo_oidc::get_duo_auth_url(
|
||||||
|
&email,
|
||||||
|
data.client_id.as_ref().unwrap(),
|
||||||
|
data.device_identifier.as_ref().unwrap(),
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||||
"Host": host,
|
"AuthUrl": auth_url,
|
||||||
"Signature": signature,
|
})
|
||||||
});
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(tf_type @ TwoFactorType::YubiKey) => {
|
Some(tf_type @ TwoFactorType::YubiKey) => {
|
||||||
|
@@ -20,7 +20,7 @@ pub use crate::api::{
|
|||||||
core::two_factor::send_incomplete_2fa_notifications,
|
core::two_factor::send_incomplete_2fa_notifications,
|
||||||
core::{emergency_notification_reminder_job, emergency_request_timeout_job},
|
core::{emergency_notification_reminder_job, emergency_request_timeout_job},
|
||||||
core::{event_cleanup_job, events_routes as core_events_routes},
|
core::{event_cleanup_job, events_routes as core_events_routes},
|
||||||
icons::{is_domain_blacklisted, routes as icons_routes},
|
icons::routes as icons_routes,
|
||||||
identity::routes as identity_routes,
|
identity::routes as identity_routes,
|
||||||
notifications::routes as notifications_routes,
|
notifications::routes as notifications_routes,
|
||||||
notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},
|
notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},
|
||||||
|
@@ -428,7 +428,7 @@ impl WebSocketUsers {
|
|||||||
let (user_uuid, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids {
|
let (user_uuid, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids {
|
||||||
(
|
(
|
||||||
Value::Nil,
|
Value::Nil,
|
||||||
Value::Array(collection_uuids.into_iter().map(|v| v.into()).collect::<Vec<rmpv::Value>>()),
|
Value::Array(collection_uuids.into_iter().map(|v| v.into()).collect::<Vec<Value>>()),
|
||||||
serialize_date(Utc::now().naive_utc()),
|
serialize_date(Utc::now().naive_utc()),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
|
@@ -1,11 +1,14 @@
|
|||||||
use reqwest::header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE};
|
use reqwest::{
|
||||||
|
header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE},
|
||||||
|
Method,
|
||||||
|
};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{ApiResult, EmptyResult, UpdateType},
|
api::{ApiResult, EmptyResult, UpdateType},
|
||||||
db::models::{Cipher, Device, Folder, Send, User},
|
db::models::{Cipher, Device, Folder, Send, User},
|
||||||
util::get_reqwest_client,
|
http_client::make_http_request,
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -50,8 +53,7 @@ async fn get_auth_push_token() -> ApiResult<String> {
|
|||||||
("client_secret", &client_secret),
|
("client_secret", &client_secret),
|
||||||
];
|
];
|
||||||
|
|
||||||
let res = match get_reqwest_client()
|
let res = match make_http_request(Method::POST, &format!("{}/connect/token", CONFIG.push_identity_uri()))?
|
||||||
.post(&format!("{}/connect/token", CONFIG.push_identity_uri()))
|
|
||||||
.form(¶ms)
|
.form(¶ms)
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
@@ -104,8 +106,7 @@ pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbC
|
|||||||
let auth_push_token = get_auth_push_token().await?;
|
let auth_push_token = get_auth_push_token().await?;
|
||||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||||
|
|
||||||
if let Err(e) = get_reqwest_client()
|
if let Err(e) = make_http_request(Method::POST, &(CONFIG.push_relay_uri() + "/push/register"))?
|
||||||
.post(CONFIG.push_relay_uri() + "/push/register")
|
|
||||||
.header(CONTENT_TYPE, "application/json")
|
.header(CONTENT_TYPE, "application/json")
|
||||||
.header(ACCEPT, "application/json")
|
.header(ACCEPT, "application/json")
|
||||||
.header(AUTHORIZATION, auth_header)
|
.header(AUTHORIZATION, auth_header)
|
||||||
@@ -132,8 +133,7 @@ pub async fn unregister_push_device(push_uuid: Option<String>) -> EmptyResult {
|
|||||||
|
|
||||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||||
|
|
||||||
match get_reqwest_client()
|
match make_http_request(Method::DELETE, &(CONFIG.push_relay_uri() + "/push/" + &push_uuid.unwrap()))?
|
||||||
.delete(CONFIG.push_relay_uri() + "/push/" + &push_uuid.unwrap())
|
|
||||||
.header(AUTHORIZATION, auth_header)
|
.header(AUTHORIZATION, auth_header)
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
@@ -266,8 +266,15 @@ async fn send_to_push_relay(notification_data: Value) {
|
|||||||
|
|
||||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||||
|
|
||||||
if let Err(e) = get_reqwest_client()
|
let req = match make_http_request(Method::POST, &(CONFIG.push_relay_uri() + "/push/send")) {
|
||||||
.post(CONFIG.push_relay_uri() + "/push/send")
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
error!("An error occurred while sending a send update to the push relay: {}", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = req
|
||||||
.header(ACCEPT, "application/json")
|
.header(ACCEPT, "application/json")
|
||||||
.header(CONTENT_TYPE, "application/json")
|
.header(CONTENT_TYPE, "application/json")
|
||||||
.header(AUTHORIZATION, &auth_header)
|
.header(AUTHORIZATION, &auth_header)
|
||||||
|
76
src/auth.rs
76
src/auth.rs
@@ -1,13 +1,18 @@
|
|||||||
// JWT Handling
|
// JWT Handling
|
||||||
//
|
//
|
||||||
use chrono::{TimeDelta, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
|
use jsonwebtoken::{errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
|
||||||
use num_traits::FromPrimitive;
|
use num_traits::FromPrimitive;
|
||||||
use once_cell::sync::{Lazy, OnceCell};
|
use once_cell::sync::{Lazy, OnceCell};
|
||||||
|
|
||||||
use jsonwebtoken::{errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
|
|
||||||
use openssl::rsa::Rsa;
|
use openssl::rsa::Rsa;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde::ser::Serialize;
|
use serde::ser::Serialize;
|
||||||
|
use std::{
|
||||||
|
env,
|
||||||
|
fs::File,
|
||||||
|
io::{Read, Write},
|
||||||
|
net::IpAddr,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{error::Error, CONFIG};
|
use crate::{error::Error, CONFIG};
|
||||||
|
|
||||||
@@ -30,28 +35,37 @@ static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|file_do
|
|||||||
static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new();
|
static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new();
|
||||||
static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new();
|
static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new();
|
||||||
|
|
||||||
pub fn initialize_keys() -> Result<(), crate::error::Error> {
|
pub fn initialize_keys() -> Result<(), Error> {
|
||||||
let mut priv_key_buffer = Vec::with_capacity(2048);
|
fn read_key(create_if_missing: bool) -> Result<(Rsa<openssl::pkey::Private>, Vec<u8>), Error> {
|
||||||
|
let mut priv_key_buffer = Vec::with_capacity(2048);
|
||||||
|
|
||||||
let priv_key = {
|
let mut priv_key_file = File::options()
|
||||||
let mut priv_key_file =
|
.create(create_if_missing)
|
||||||
File::options().create(true).truncate(false).read(true).write(true).open(CONFIG.private_rsa_key())?;
|
.truncate(false)
|
||||||
|
.read(true)
|
||||||
|
.write(create_if_missing)
|
||||||
|
.open(CONFIG.private_rsa_key())?;
|
||||||
|
|
||||||
#[allow(clippy::verbose_file_reads)]
|
#[allow(clippy::verbose_file_reads)]
|
||||||
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
|
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
|
||||||
|
|
||||||
if bytes_read > 0 {
|
let rsa_key = if bytes_read > 0 {
|
||||||
Rsa::private_key_from_pem(&priv_key_buffer[..bytes_read])?
|
Rsa::private_key_from_pem(&priv_key_buffer[..bytes_read])?
|
||||||
} else {
|
} else if create_if_missing {
|
||||||
// Only create the key if the file doesn't exist or is empty
|
// Only create the key if the file doesn't exist or is empty
|
||||||
let rsa_key = openssl::rsa::Rsa::generate(2048)?;
|
let rsa_key = Rsa::generate(2048)?;
|
||||||
priv_key_buffer = rsa_key.private_key_to_pem()?;
|
priv_key_buffer = rsa_key.private_key_to_pem()?;
|
||||||
priv_key_file.write_all(&priv_key_buffer)?;
|
priv_key_file.write_all(&priv_key_buffer)?;
|
||||||
info!("Private key created correctly.");
|
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
|
||||||
rsa_key
|
rsa_key
|
||||||
}
|
} else {
|
||||||
};
|
err!("Private key does not exist or invalid format", CONFIG.private_rsa_key());
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok((rsa_key, priv_key_buffer))
|
||||||
|
}
|
||||||
|
|
||||||
|
let (priv_key, priv_key_buffer) = read_key(true).or_else(|_| read_key(false))?;
|
||||||
let pub_key_buffer = priv_key.public_key_to_pem()?;
|
let pub_key_buffer = priv_key.public_key_to_pem()?;
|
||||||
|
|
||||||
let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?;
|
let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?;
|
||||||
@@ -379,8 +393,6 @@ impl<'r> FromRequest<'r> for Host {
|
|||||||
referer.to_string()
|
referer.to_string()
|
||||||
} else {
|
} else {
|
||||||
// Try to guess from the headers
|
// Try to guess from the headers
|
||||||
use std::env;
|
|
||||||
|
|
||||||
let protocol = if let Some(proto) = headers.get_one("X-Forwarded-Proto") {
|
let protocol = if let Some(proto) = headers.get_one("X-Forwarded-Proto") {
|
||||||
proto
|
proto
|
||||||
} else if env::var("ROCKET_TLS").is_ok() {
|
} else if env::var("ROCKET_TLS").is_ok() {
|
||||||
@@ -805,11 +817,6 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
|
|||||||
//
|
//
|
||||||
// Client IP address detection
|
// Client IP address detection
|
||||||
//
|
//
|
||||||
use std::{
|
|
||||||
fs::File,
|
|
||||||
io::{Read, Write},
|
|
||||||
net::IpAddr,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub struct ClientIp {
|
pub struct ClientIp {
|
||||||
pub ip: IpAddr,
|
pub ip: IpAddr,
|
||||||
@@ -842,6 +849,35 @@ impl<'r> FromRequest<'r> for ClientIp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct Secure {
|
||||||
|
pub https: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rocket::async_trait]
|
||||||
|
impl<'r> FromRequest<'r> for Secure {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
|
let headers = request.headers();
|
||||||
|
|
||||||
|
// Try to guess from the headers
|
||||||
|
let protocol = match headers.get_one("X-Forwarded-Proto") {
|
||||||
|
Some(proto) => proto,
|
||||||
|
None => {
|
||||||
|
if env::var("ROCKET_TLS").is_ok() {
|
||||||
|
"https"
|
||||||
|
} else {
|
||||||
|
"http"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Outcome::Success(Secure {
|
||||||
|
https: protocol == "https",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct WsAccessTokenHeader {
|
pub struct WsAccessTokenHeader {
|
||||||
pub access_token: Option<String>,
|
pub access_token: Option<String>,
|
||||||
}
|
}
|
||||||
|
119
src/config.rs
119
src/config.rs
@@ -146,6 +146,12 @@ macro_rules! make_config {
|
|||||||
config.signups_domains_whitelist = config.signups_domains_whitelist.trim().to_lowercase();
|
config.signups_domains_whitelist = config.signups_domains_whitelist.trim().to_lowercase();
|
||||||
config.org_creation_users = config.org_creation_users.trim().to_lowercase();
|
config.org_creation_users = config.org_creation_users.trim().to_lowercase();
|
||||||
|
|
||||||
|
|
||||||
|
// Copy the values from the deprecated flags to the new ones
|
||||||
|
if config.http_request_block_regex.is_none() {
|
||||||
|
config.http_request_block_regex = config.icon_blacklist_regex.clone();
|
||||||
|
}
|
||||||
|
|
||||||
config
|
config
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -325,7 +331,7 @@ macro_rules! make_config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}};
|
}};
|
||||||
( @build $value:expr, $config:expr, gen, $default_fn:expr ) => {{
|
( @build $value:expr, $config:expr, generated, $default_fn:expr ) => {{
|
||||||
let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn;
|
let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn;
|
||||||
f($config)
|
f($config)
|
||||||
}};
|
}};
|
||||||
@@ -343,10 +349,10 @@ macro_rules! make_config {
|
|||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// Where action applied when the value wasn't provided and can be:
|
// Where action applied when the value wasn't provided and can be:
|
||||||
// def: Use a default value
|
// def: Use a default value
|
||||||
// auto: Value is auto generated based on other values
|
// auto: Value is auto generated based on other values
|
||||||
// option: Value is optional
|
// option: Value is optional
|
||||||
// gen: Value is always autogenerated and it's original value ignored
|
// generated: Value is always autogenerated and it's original value ignored
|
||||||
make_config! {
|
make_config! {
|
||||||
folders {
|
folders {
|
||||||
/// Data folder |> Main data folder
|
/// Data folder |> Main data folder
|
||||||
@@ -409,7 +415,9 @@ make_config! {
|
|||||||
/// Auth Request cleanup schedule |> Cron schedule of the job that cleans old auth requests from the auth request.
|
/// Auth Request cleanup schedule |> Cron schedule of the job that cleans old auth requests from the auth request.
|
||||||
/// Defaults to every minute. Set blank to disable this job.
|
/// Defaults to every minute. Set blank to disable this job.
|
||||||
auth_request_purge_schedule: String, false, def, "30 * * * * *".to_string();
|
auth_request_purge_schedule: String, false, def, "30 * * * * *".to_string();
|
||||||
|
/// Duo Auth context cleanup schedule |> Cron schedule of the job that cleans expired Duo contexts from the database. Does nothing if Duo MFA is disabled or set to use the legacy iframe prompt.
|
||||||
|
/// Defaults to once every minute. Set blank to disable this job.
|
||||||
|
duo_context_purge_schedule: String, false, def, "30 * * * * *".to_string();
|
||||||
},
|
},
|
||||||
|
|
||||||
/// General settings
|
/// General settings
|
||||||
@@ -507,7 +515,7 @@ make_config! {
|
|||||||
/// Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
/// Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||||
ip_header: String, true, def, "X-Real-IP".to_string();
|
ip_header: String, true, def, "X-Real-IP".to_string();
|
||||||
/// Internal IP header property, used to avoid recomputing each time
|
/// Internal IP header property, used to avoid recomputing each time
|
||||||
_ip_header_enabled: bool, false, gen, |c| &c.ip_header.trim().to_lowercase() != "none";
|
_ip_header_enabled: bool, false, generated, |c| &c.ip_header.trim().to_lowercase() != "none";
|
||||||
/// Icon service |> The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
/// Icon service |> The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||||
/// To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
/// To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||||
/// which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
/// which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||||
@@ -516,9 +524,9 @@ make_config! {
|
|||||||
/// corresponding icon at the external service.
|
/// corresponding icon at the external service.
|
||||||
icon_service: String, false, def, "internal".to_string();
|
icon_service: String, false, def, "internal".to_string();
|
||||||
/// _icon_service_url
|
/// _icon_service_url
|
||||||
_icon_service_url: String, false, gen, |c| generate_icon_service_url(&c.icon_service);
|
_icon_service_url: String, false, generated, |c| generate_icon_service_url(&c.icon_service);
|
||||||
/// _icon_service_csp
|
/// _icon_service_csp
|
||||||
_icon_service_csp: String, false, gen, |c| generate_icon_service_csp(&c.icon_service, &c._icon_service_url);
|
_icon_service_csp: String, false, generated, |c| generate_icon_service_csp(&c.icon_service, &c._icon_service_url);
|
||||||
/// Icon redirect code |> The HTTP status code to use for redirects to an external icon service.
|
/// Icon redirect code |> The HTTP status code to use for redirects to an external icon service.
|
||||||
/// The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
/// The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||||
/// Temporary redirects are useful while testing different icon services, but once a service
|
/// Temporary redirects are useful while testing different icon services, but once a service
|
||||||
@@ -531,12 +539,18 @@ make_config! {
|
|||||||
icon_cache_negttl: u64, true, def, 259_200;
|
icon_cache_negttl: u64, true, def, 259_200;
|
||||||
/// Icon download timeout |> Number of seconds when to stop attempting to download an icon.
|
/// Icon download timeout |> Number of seconds when to stop attempting to download an icon.
|
||||||
icon_download_timeout: u64, true, def, 10;
|
icon_download_timeout: u64, true, def, 10;
|
||||||
/// Icon blacklist Regex |> Any domains or IPs that match this regex won't be fetched by the icon service.
|
|
||||||
|
/// [Deprecated] Icon blacklist Regex |> Use `http_request_block_regex` instead
|
||||||
|
icon_blacklist_regex: String, false, option;
|
||||||
|
/// [Deprecated] Icon blacklist non global IPs |> Use `http_request_block_non_global_ips` instead
|
||||||
|
icon_blacklist_non_global_ips: bool, false, def, true;
|
||||||
|
|
||||||
|
/// Block HTTP domains/IPs by Regex |> Any domains or IPs that match this regex won't be fetched by the internal HTTP client.
|
||||||
/// Useful to hide other servers in the local network. Check the WIKI for more details
|
/// Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
icon_blacklist_regex: String, true, option;
|
http_request_block_regex: String, true, option;
|
||||||
/// Icon blacklist non global IPs |> Any IP which is not defined as a global IP will be blacklisted.
|
/// Block non global IPs |> Enabling this will cause the internal HTTP client to refuse to connect to any non global IP address.
|
||||||
/// Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
/// Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
icon_blacklist_non_global_ips: bool, true, def, true;
|
http_request_block_non_global_ips: bool, true, auto, |c| c.icon_blacklist_non_global_ips;
|
||||||
|
|
||||||
/// Disable Two-Factor remember |> Enabling this would force the users to use a second factor to login every time.
|
/// Disable Two-Factor remember |> Enabling this would force the users to use a second factor to login every time.
|
||||||
/// Note that the checkbox would still be present, but ignored.
|
/// Note that the checkbox would still be present, but ignored.
|
||||||
@@ -564,8 +578,9 @@ make_config! {
|
|||||||
use_syslog: bool, false, def, false;
|
use_syslog: bool, false, def, false;
|
||||||
/// Log file path
|
/// Log file path
|
||||||
log_file: String, false, option;
|
log_file: String, false, option;
|
||||||
/// Log level
|
/// Log level |> Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
log_level: String, false, def, "Info".to_string();
|
/// For a specific module append it as a comma separated value "info,path::to::module=debug"
|
||||||
|
log_level: String, false, def, "info".to_string();
|
||||||
|
|
||||||
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using vaultwarden on some exotic filesystems,
|
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using vaultwarden on some exotic filesystems,
|
||||||
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
|
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
|
||||||
@@ -603,7 +618,18 @@ make_config! {
|
|||||||
admin_session_lifetime: i64, true, def, 20;
|
admin_session_lifetime: i64, true, def, 20;
|
||||||
|
|
||||||
/// Enable groups (BETA!) (Know the risks!) |> Enables groups support for organizations (Currently contains known issues!).
|
/// Enable groups (BETA!) (Know the risks!) |> Enables groups support for organizations (Currently contains known issues!).
|
||||||
org_groups_enabled: bool, false, def, false;
|
org_groups_enabled: bool, false, def, false;
|
||||||
|
|
||||||
|
/// Increase note size limit (Know the risks!) |> Sets the secure note size limit to 100_000 instead of the default 10_000.
|
||||||
|
/// WARNING: This could cause issues with clients. Also exports will not work on Bitwarden servers!
|
||||||
|
increase_note_size_limit: bool, true, def, false;
|
||||||
|
/// Generated max_note_size value to prevent if..else matching during every check
|
||||||
|
_max_note_size: usize, false, generated, |c| if c.increase_note_size_limit {100_000} else {10_000};
|
||||||
|
|
||||||
|
/// Enforce Single Org with Reset Password Policy |> Enforce that the Single Org policy is enabled before setting the Reset Password policy
|
||||||
|
/// Bitwarden enforces this by default. In Vaultwarden we encouraged to use multiple organizations because groups were not available.
|
||||||
|
/// Setting this to true will enforce the Single Org Policy to be enabled before you can enable the Reset Password policy.
|
||||||
|
enforce_single_org_with_reset_pw_policy: bool, false, def, false;
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Yubikey settings
|
/// Yubikey settings
|
||||||
@@ -622,6 +648,8 @@ make_config! {
|
|||||||
duo: _enable_duo {
|
duo: _enable_duo {
|
||||||
/// Enabled
|
/// Enabled
|
||||||
_enable_duo: bool, true, def, true;
|
_enable_duo: bool, true, def, true;
|
||||||
|
/// Attempt to use deprecated iframe-based Traditional Prompt (Duo WebSDK 2)
|
||||||
|
duo_use_iframe: bool, false, def, false;
|
||||||
/// Integration Key
|
/// Integration Key
|
||||||
duo_ikey: String, true, option;
|
duo_ikey: String, true, option;
|
||||||
/// Secret Key
|
/// Secret Key
|
||||||
@@ -667,7 +695,7 @@ make_config! {
|
|||||||
/// Embed images as email attachments.
|
/// Embed images as email attachments.
|
||||||
smtp_embed_images: bool, true, def, true;
|
smtp_embed_images: bool, true, def, true;
|
||||||
/// _smtp_img_src
|
/// _smtp_img_src
|
||||||
_smtp_img_src: String, false, gen, |c| generate_smtp_img_src(c.smtp_embed_images, &c.domain);
|
_smtp_img_src: String, false, generated, |c| generate_smtp_img_src(c.smtp_embed_images, &c.domain);
|
||||||
/// Enable SMTP debugging (Know the risks!) |> DANGEROUS: Enabling this will output very detailed SMTP messages. This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
/// Enable SMTP debugging (Know the risks!) |> DANGEROUS: Enabling this will output very detailed SMTP messages. This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||||
smtp_debug: bool, false, def, false;
|
smtp_debug: bool, false, def, false;
|
||||||
/// Accept Invalid Certs (Know the risks!) |> DANGEROUS: Allow invalid certificates. This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
/// Accept Invalid Certs (Know the risks!) |> DANGEROUS: Allow invalid certificates. This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
@@ -899,12 +927,12 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
err!("To use email 2FA as automatic fallback, email 2fa has to be enabled!");
|
err!("To use email 2FA as automatic fallback, email 2fa has to be enabled!");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the icon blacklist regex is valid
|
// Check if the HTTP request block regex is valid
|
||||||
if let Some(ref r) = cfg.icon_blacklist_regex {
|
if let Some(ref r) = cfg.http_request_block_regex {
|
||||||
let validate_regex = regex::Regex::new(r);
|
let validate_regex = regex::Regex::new(r);
|
||||||
match validate_regex {
|
match validate_regex {
|
||||||
Ok(_) => (),
|
Ok(_) => (),
|
||||||
Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {e:#?}")),
|
Err(e) => err!(format!("`HTTP_REQUEST_BLOCK_REGEX` is invalid: {e:#?}")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -984,6 +1012,11 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cfg.increase_note_size_limit {
|
||||||
|
println!("[WARNING] Secure Note size limit is increased to 100_000!");
|
||||||
|
println!("[WARNING] This could cause issues with clients. Also exports will not work on Bitwarden servers!.");
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1192,7 +1225,7 @@ impl Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn private_rsa_key(&self) -> String {
|
pub fn private_rsa_key(&self) -> String {
|
||||||
format!("{}.pem", CONFIG.rsa_key_filename())
|
format!("{}.pem", self.rsa_key_filename())
|
||||||
}
|
}
|
||||||
pub fn mail_enabled(&self) -> bool {
|
pub fn mail_enabled(&self) -> bool {
|
||||||
let inner = &self.inner.read().unwrap().config;
|
let inner = &self.inner.read().unwrap().config;
|
||||||
@@ -1223,12 +1256,8 @@ impl Config {
|
|||||||
token.is_some() && !token.unwrap().trim().is_empty()
|
token.is_some() && !token.unwrap().trim().is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn render_template<T: serde::ser::Serialize>(
|
pub fn render_template<T: serde::ser::Serialize>(&self, name: &str, data: &T) -> Result<String, Error> {
|
||||||
&self,
|
if self.reload_templates() {
|
||||||
name: &str,
|
|
||||||
data: &T,
|
|
||||||
) -> Result<String, crate::error::Error> {
|
|
||||||
if CONFIG.reload_templates() {
|
|
||||||
warn!("RELOADING TEMPLATES");
|
warn!("RELOADING TEMPLATES");
|
||||||
let hb = load_templates(CONFIG.templates_folder());
|
let hb = load_templates(CONFIG.templates_folder());
|
||||||
hb.render(name, data).map_err(Into::into)
|
hb.render(name, data).map_err(Into::into)
|
||||||
@@ -1265,7 +1294,6 @@ where
|
|||||||
hb.set_strict_mode(true);
|
hb.set_strict_mode(true);
|
||||||
// Register helpers
|
// Register helpers
|
||||||
hb.register_helper("case", Box::new(case_helper));
|
hb.register_helper("case", Box::new(case_helper));
|
||||||
hb.register_helper("jsesc", Box::new(js_escape_helper));
|
|
||||||
hb.register_helper("to_json", Box::new(to_json));
|
hb.register_helper("to_json", Box::new(to_json));
|
||||||
|
|
||||||
macro_rules! reg {
|
macro_rules! reg {
|
||||||
@@ -1323,14 +1351,7 @@ where
|
|||||||
// And then load user templates to overwrite the defaults
|
// And then load user templates to overwrite the defaults
|
||||||
// Use .hbs extension for the files
|
// Use .hbs extension for the files
|
||||||
// Templates get registered with their relative name
|
// Templates get registered with their relative name
|
||||||
hb.register_templates_directory(
|
hb.register_templates_directory(path, DirectorySourceOptions::default()).unwrap();
|
||||||
path,
|
|
||||||
DirectorySourceOptions {
|
|
||||||
tpl_extension: ".hbs".to_owned(),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
hb
|
hb
|
||||||
}
|
}
|
||||||
@@ -1353,32 +1374,6 @@ fn case_helper<'reg, 'rc>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn js_escape_helper<'reg, 'rc>(
|
|
||||||
h: &Helper<'rc>,
|
|
||||||
_r: &'reg Handlebars<'_>,
|
|
||||||
_ctx: &'rc Context,
|
|
||||||
_rc: &mut RenderContext<'reg, 'rc>,
|
|
||||||
out: &mut dyn Output,
|
|
||||||
) -> HelperResult {
|
|
||||||
let param =
|
|
||||||
h.param(0).ok_or_else(|| RenderErrorReason::Other(String::from("Param not found for helper \"jsesc\"")))?;
|
|
||||||
|
|
||||||
let no_quote = h.param(1).is_some();
|
|
||||||
|
|
||||||
let value = param
|
|
||||||
.value()
|
|
||||||
.as_str()
|
|
||||||
.ok_or_else(|| RenderErrorReason::Other(String::from("Param for helper \"jsesc\" is not a String")))?;
|
|
||||||
|
|
||||||
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
|
||||||
if !no_quote {
|
|
||||||
escaped_value = format!(""{escaped_value}"");
|
|
||||||
}
|
|
||||||
|
|
||||||
out.write(&escaped_value)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_json<'reg, 'rc>(
|
fn to_json<'reg, 'rc>(
|
||||||
h: &Helper<'rc>,
|
h: &Helper<'rc>,
|
||||||
_r: &'reg Handlebars<'_>,
|
_r: &'reg Handlebars<'_>,
|
||||||
|
@@ -300,19 +300,17 @@ pub trait FromDb {
|
|||||||
|
|
||||||
impl<T: FromDb> FromDb for Vec<T> {
|
impl<T: FromDb> FromDb for Vec<T> {
|
||||||
type Output = Vec<T::Output>;
|
type Output = Vec<T::Output>;
|
||||||
#[allow(clippy::wrong_self_convention)]
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn from_db(self) -> Self::Output {
|
fn from_db(self) -> Self::Output {
|
||||||
self.into_iter().map(crate::db::FromDb::from_db).collect()
|
self.into_iter().map(FromDb::from_db).collect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: FromDb> FromDb for Option<T> {
|
impl<T: FromDb> FromDb for Option<T> {
|
||||||
type Output = Option<T::Output>;
|
type Output = Option<T::Output>;
|
||||||
#[allow(clippy::wrong_self_convention)]
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn from_db(self) -> Self::Output {
|
fn from_db(self) -> Self::Output {
|
||||||
self.map(crate::db::FromDb::from_db)
|
self.map(FromDb::from_db)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -368,23 +366,31 @@ pub mod models;
|
|||||||
|
|
||||||
/// Creates a back-up of the sqlite database
|
/// Creates a back-up of the sqlite database
|
||||||
/// MySQL/MariaDB and PostgreSQL are not supported.
|
/// MySQL/MariaDB and PostgreSQL are not supported.
|
||||||
pub async fn backup_database(conn: &mut DbConn) -> Result<(), Error> {
|
pub async fn backup_database(conn: &mut DbConn) -> Result<String, Error> {
|
||||||
db_run! {@raw conn:
|
db_run! {@raw conn:
|
||||||
postgresql, mysql {
|
postgresql, mysql {
|
||||||
let _ = conn;
|
let _ = conn;
|
||||||
err!("PostgreSQL and MySQL/MariaDB do not support this backup feature");
|
err!("PostgreSQL and MySQL/MariaDB do not support this backup feature");
|
||||||
}
|
}
|
||||||
sqlite {
|
sqlite {
|
||||||
use std::path::Path;
|
backup_sqlite_database(conn)
|
||||||
let db_url = CONFIG.database_url();
|
|
||||||
let db_path = Path::new(&db_url).parent().unwrap().to_string_lossy();
|
|
||||||
let file_date = chrono::Utc::now().format("%Y%m%d_%H%M%S").to_string();
|
|
||||||
diesel::sql_query(format!("VACUUM INTO '{db_path}/db_{file_date}.sqlite3'")).execute(conn)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(sqlite)]
|
||||||
|
pub fn backup_sqlite_database(conn: &mut diesel::sqlite::SqliteConnection) -> Result<String, Error> {
|
||||||
|
use diesel::RunQueryDsl;
|
||||||
|
let db_url = CONFIG.database_url();
|
||||||
|
let db_path = std::path::Path::new(&db_url).parent().unwrap();
|
||||||
|
let backup_file = db_path
|
||||||
|
.join(format!("db_{}.sqlite3", chrono::Utc::now().format("%Y%m%d_%H%M%S")))
|
||||||
|
.to_string_lossy()
|
||||||
|
.into_owned();
|
||||||
|
diesel::sql_query(format!("VACUUM INTO '{backup_file}'")).execute(conn)?;
|
||||||
|
Ok(backup_file)
|
||||||
|
}
|
||||||
|
|
||||||
/// Get the SQL Server version
|
/// Get the SQL Server version
|
||||||
pub async fn get_sql_server_version(conn: &mut DbConn) -> String {
|
pub async fn get_sql_server_version(conn: &mut DbConn) -> String {
|
||||||
db_run! {@raw conn:
|
db_run! {@raw conn:
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
use crate::util::LowerCase;
|
use crate::util::LowerCase;
|
||||||
use crate::CONFIG;
|
use crate::CONFIG;
|
||||||
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
use chrono::{DateTime, NaiveDateTime, TimeDelta, Utc};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
@@ -79,21 +79,39 @@ impl Cipher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
|
pub fn validate_cipher_data(cipher_data: &[CipherData]) -> EmptyResult {
|
||||||
let mut validation_errors = serde_json::Map::new();
|
let mut validation_errors = serde_json::Map::new();
|
||||||
|
let max_note_size = CONFIG._max_note_size();
|
||||||
|
let max_note_size_msg =
|
||||||
|
format!("The field Notes exceeds the maximum encrypted value length of {} characters.", &max_note_size);
|
||||||
for (index, cipher) in cipher_data.iter().enumerate() {
|
for (index, cipher) in cipher_data.iter().enumerate() {
|
||||||
|
// Validate the note size and if it is exceeded return a warning
|
||||||
if let Some(note) = &cipher.notes {
|
if let Some(note) = &cipher.notes {
|
||||||
if note.len() > 10_000 {
|
if note.len() > max_note_size {
|
||||||
validation_errors.insert(
|
validation_errors
|
||||||
format!("Ciphers[{index}].Notes"),
|
.insert(format!("Ciphers[{index}].Notes"), serde_json::to_value([&max_note_size_msg]).unwrap());
|
||||||
serde_json::to_value([
|
}
|
||||||
"The field Notes exceeds the maximum encrypted value length of 10000 characters.",
|
}
|
||||||
])
|
|
||||||
.unwrap(),
|
// Validate the password history if it contains `null` values and if so, return a warning
|
||||||
);
|
if let Some(Value::Array(password_history)) = &cipher.password_history {
|
||||||
|
for pwh in password_history {
|
||||||
|
if let Value::Object(pwo) = pwh {
|
||||||
|
if pwo.get("password").is_some_and(|p| !p.is_string()) {
|
||||||
|
validation_errors.insert(
|
||||||
|
format!("Ciphers[{index}].Notes"),
|
||||||
|
serde_json::to_value([
|
||||||
|
"The password history contains a `null` value. Only strings are allowed.",
|
||||||
|
])
|
||||||
|
.unwrap(),
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !validation_errors.is_empty() {
|
if !validation_errors.is_empty() {
|
||||||
let err_json = json!({
|
let err_json = json!({
|
||||||
"message": "The model state is invalid.",
|
"message": "The model state is invalid.",
|
||||||
@@ -155,27 +173,48 @@ impl Cipher {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|s| {
|
.and_then(|s| {
|
||||||
serde_json::from_str::<Vec<LowerCase<Value>>>(s)
|
serde_json::from_str::<Vec<LowerCase<Value>>>(s)
|
||||||
.inspect_err(|e| warn!("Error parsing fields {:?}", e))
|
.inspect_err(|e| warn!("Error parsing fields {e:?} for {}", self.uuid))
|
||||||
.ok()
|
|
||||||
})
|
|
||||||
.map(|d| d.into_iter().map(|d| d.data).collect())
|
|
||||||
.unwrap_or_default();
|
|
||||||
let password_history_json: Vec<_> = self
|
|
||||||
.password_history
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|s| {
|
|
||||||
serde_json::from_str::<Vec<LowerCase<Value>>>(s)
|
|
||||||
.inspect_err(|e| warn!("Error parsing password history {:?}", e))
|
|
||||||
.ok()
|
.ok()
|
||||||
})
|
})
|
||||||
.map(|d| d.into_iter().map(|d| d.data).collect())
|
.map(|d| d.into_iter().map(|d| d.data).collect())
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let password_history_json: Vec<_> = self
|
||||||
|
.password_history
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| {
|
||||||
|
serde_json::from_str::<Vec<LowerCase<Value>>>(s)
|
||||||
|
.inspect_err(|e| warn!("Error parsing password history {e:?} for {}", self.uuid))
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.map(|d| {
|
||||||
|
// Check every password history item if they are valid and return it.
|
||||||
|
// If a password field has the type `null` skip it, it breaks newer Bitwarden clients
|
||||||
|
// A second check is done to verify the lastUsedDate exists and is a valid DateTime string, if not the epoch start time will be used
|
||||||
|
d.into_iter()
|
||||||
|
.filter_map(|d| match d.data.get("password") {
|
||||||
|
Some(p) if p.is_string() => Some(d.data),
|
||||||
|
_ => None,
|
||||||
|
})
|
||||||
|
.map(|d| match d.get("lastUsedDate").and_then(|l| l.as_str()) {
|
||||||
|
Some(l) if DateTime::parse_from_rfc3339(l).is_ok() => d,
|
||||||
|
_ => {
|
||||||
|
let mut d = d;
|
||||||
|
d["lastUsedDate"] = json!("1970-01-01T00:00:00.000Z");
|
||||||
|
d
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
// Get the type_data or a default to an empty json object '{}'.
|
// Get the type_data or a default to an empty json object '{}'.
|
||||||
// If not passing an empty object, mobile clients will crash.
|
// If not passing an empty object, mobile clients will crash.
|
||||||
let mut type_data_json = serde_json::from_str::<LowerCase<Value>>(&self.data)
|
let mut type_data_json =
|
||||||
.map(|d| d.data)
|
serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_else(|_| {
|
||||||
.unwrap_or_else(|_| Value::Object(serde_json::Map::new()));
|
warn!("Error parsing data field for {}", self.uuid);
|
||||||
|
Value::Object(serde_json::Map::new())
|
||||||
|
});
|
||||||
|
|
||||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||||
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
|
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
|
||||||
@@ -189,10 +228,15 @@ impl Cipher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fix secure note issues when data is `{}`
|
// Fix secure note issues when data is invalid
|
||||||
// This breaks at least the native mobile clients
|
// This breaks at least the native mobile clients
|
||||||
if self.atype == 2 && (self.data.eq("{}") || self.data.to_ascii_lowercase().eq("{\"type\":null}")) {
|
if self.atype == 2 {
|
||||||
type_data_json = json!({"type": 0});
|
match type_data_json {
|
||||||
|
Value::Object(ref t) if t.get("type").is_some_and(|t| t.is_number()) => {}
|
||||||
|
_ => {
|
||||||
|
type_data_json = json!({"type": 0});
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clone the type_data and add some default value.
|
// Clone the type_data and add some default value.
|
||||||
@@ -620,6 +664,17 @@ impl Cipher {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_by_uuid_and_org(cipher_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||||
|
db_run! {conn: {
|
||||||
|
ciphers::table
|
||||||
|
.filter(ciphers::uuid.eq(cipher_uuid))
|
||||||
|
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||||
|
.first::<CipherDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
// Find all ciphers accessible or visible to the specified user.
|
// Find all ciphers accessible or visible to the specified user.
|
||||||
//
|
//
|
||||||
// "Accessible" means the user has read access to the cipher, either via
|
// "Accessible" means the user has read access to the cipher, either via
|
||||||
|
@@ -78,28 +78,46 @@ impl Collection {
|
|||||||
cipher_sync_data: Option<&crate::api::core::CipherSyncData>,
|
cipher_sync_data: Option<&crate::api::core::CipherSyncData>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> Value {
|
) -> Value {
|
||||||
let (read_only, hide_passwords) = if let Some(cipher_sync_data) = cipher_sync_data {
|
let (read_only, hide_passwords, can_manage) = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||||
match cipher_sync_data.user_organizations.get(&self.org_uuid) {
|
match cipher_sync_data.user_organizations.get(&self.org_uuid) {
|
||||||
Some(uo) if uo.has_full_access() => (false, false),
|
// Only for Manager types Bitwarden returns true for the can_manage option
|
||||||
Some(_) => {
|
// Owners and Admins always have false, but they can manage all collections anyway
|
||||||
|
Some(uo) if uo.has_full_access() => (false, false, uo.atype == UserOrgType::Manager),
|
||||||
|
Some(uo) => {
|
||||||
|
// Only let a manager manage collections when the have full read/write access
|
||||||
|
let is_manager = uo.atype == UserOrgType::Manager;
|
||||||
if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
|
if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
|
||||||
(uc.read_only, uc.hide_passwords)
|
(uc.read_only, uc.hide_passwords, is_manager && !uc.read_only && !uc.hide_passwords)
|
||||||
} else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) {
|
} else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) {
|
||||||
(cg.read_only, cg.hide_passwords)
|
(cg.read_only, cg.hide_passwords, is_manager && !cg.read_only && !cg.hide_passwords)
|
||||||
} else {
|
} else {
|
||||||
(false, false)
|
(false, false, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => (true, true),
|
_ => (true, true, false),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
(!self.is_writable_by_user(user_uuid, conn).await, self.hide_passwords_for_user(user_uuid, conn).await)
|
match UserOrganization::find_confirmed_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||||
|
Some(ou) if ou.has_full_access() => (false, false, ou.atype == UserOrgType::Manager),
|
||||||
|
Some(ou) => {
|
||||||
|
let is_manager = ou.atype == UserOrgType::Manager;
|
||||||
|
let read_only = !self.is_writable_by_user(user_uuid, conn).await;
|
||||||
|
let hide_passwords = self.hide_passwords_for_user(user_uuid, conn).await;
|
||||||
|
(read_only, hide_passwords, is_manager && !read_only && !hide_passwords)
|
||||||
|
}
|
||||||
|
_ => (
|
||||||
|
!self.is_writable_by_user(user_uuid, conn).await,
|
||||||
|
self.hide_passwords_for_user(user_uuid, conn).await,
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut json_object = self.to_json();
|
let mut json_object = self.to_json();
|
||||||
json_object["object"] = json!("collectionDetails");
|
json_object["object"] = json!("collectionDetails");
|
||||||
json_object["readOnly"] = json!(read_only);
|
json_object["readOnly"] = json!(read_only);
|
||||||
json_object["hidePasswords"] = json!(hide_passwords);
|
json_object["hidePasswords"] = json!(hide_passwords);
|
||||||
|
json_object["manage"] = json!(can_manage);
|
||||||
json_object
|
json_object
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -16,7 +16,7 @@ db_object! {
|
|||||||
pub user_uuid: String,
|
pub user_uuid: String,
|
||||||
|
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub atype: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
|
pub atype: i32, // https://github.com/bitwarden/server/blob/dcc199bcce4aa2d5621f6fab80f1b49d8b143418/src/Core/Enums/DeviceType.cs
|
||||||
pub push_uuid: Option<String>,
|
pub push_uuid: Option<String>,
|
||||||
pub push_token: Option<String>,
|
pub push_token: Option<String>,
|
||||||
|
|
||||||
@@ -267,6 +267,9 @@ pub enum DeviceType {
|
|||||||
SafariExtension = 20,
|
SafariExtension = 20,
|
||||||
Sdk = 21,
|
Sdk = 21,
|
||||||
Server = 22,
|
Server = 22,
|
||||||
|
WindowsCLI = 23,
|
||||||
|
MacOsCLI = 24,
|
||||||
|
LinuxCLI = 25,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for DeviceType {
|
impl fmt::Display for DeviceType {
|
||||||
@@ -278,23 +281,26 @@ impl fmt::Display for DeviceType {
|
|||||||
DeviceType::FirefoxExtension => write!(f, "Firefox Extension"),
|
DeviceType::FirefoxExtension => write!(f, "Firefox Extension"),
|
||||||
DeviceType::OperaExtension => write!(f, "Opera Extension"),
|
DeviceType::OperaExtension => write!(f, "Opera Extension"),
|
||||||
DeviceType::EdgeExtension => write!(f, "Edge Extension"),
|
DeviceType::EdgeExtension => write!(f, "Edge Extension"),
|
||||||
DeviceType::WindowsDesktop => write!(f, "Windows Desktop"),
|
DeviceType::WindowsDesktop => write!(f, "Windows"),
|
||||||
DeviceType::MacOsDesktop => write!(f, "MacOS Desktop"),
|
DeviceType::MacOsDesktop => write!(f, "macOS"),
|
||||||
DeviceType::LinuxDesktop => write!(f, "Linux Desktop"),
|
DeviceType::LinuxDesktop => write!(f, "Linux"),
|
||||||
DeviceType::ChromeBrowser => write!(f, "Chrome Browser"),
|
DeviceType::ChromeBrowser => write!(f, "Chrome"),
|
||||||
DeviceType::FirefoxBrowser => write!(f, "Firefox Browser"),
|
DeviceType::FirefoxBrowser => write!(f, "Firefox"),
|
||||||
DeviceType::OperaBrowser => write!(f, "Opera Browser"),
|
DeviceType::OperaBrowser => write!(f, "Opera"),
|
||||||
DeviceType::EdgeBrowser => write!(f, "Edge Browser"),
|
DeviceType::EdgeBrowser => write!(f, "Edge"),
|
||||||
DeviceType::IEBrowser => write!(f, "Internet Explorer"),
|
DeviceType::IEBrowser => write!(f, "Internet Explorer"),
|
||||||
DeviceType::UnknownBrowser => write!(f, "Unknown Browser"),
|
DeviceType::UnknownBrowser => write!(f, "Unknown Browser"),
|
||||||
DeviceType::AndroidAmazon => write!(f, "Android Amazon"),
|
DeviceType::AndroidAmazon => write!(f, "Android"),
|
||||||
DeviceType::Uwp => write!(f, "UWP"),
|
DeviceType::Uwp => write!(f, "UWP"),
|
||||||
DeviceType::SafariBrowser => write!(f, "Safari Browser"),
|
DeviceType::SafariBrowser => write!(f, "Safari"),
|
||||||
DeviceType::VivaldiBrowser => write!(f, "Vivaldi Browser"),
|
DeviceType::VivaldiBrowser => write!(f, "Vivaldi"),
|
||||||
DeviceType::VivaldiExtension => write!(f, "Vivaldi Extension"),
|
DeviceType::VivaldiExtension => write!(f, "Vivaldi Extension"),
|
||||||
DeviceType::SafariExtension => write!(f, "Safari Extension"),
|
DeviceType::SafariExtension => write!(f, "Safari Extension"),
|
||||||
DeviceType::Sdk => write!(f, "SDK"),
|
DeviceType::Sdk => write!(f, "SDK"),
|
||||||
DeviceType::Server => write!(f, "Server"),
|
DeviceType::Server => write!(f, "Server"),
|
||||||
|
DeviceType::WindowsCLI => write!(f, "Windows CLI"),
|
||||||
|
DeviceType::MacOsCLI => write!(f, "macOS CLI"),
|
||||||
|
DeviceType::LinuxCLI => write!(f, "Linux CLI"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -325,6 +331,9 @@ impl DeviceType {
|
|||||||
20 => DeviceType::SafariExtension,
|
20 => DeviceType::SafariExtension,
|
||||||
21 => DeviceType::Sdk,
|
21 => DeviceType::Sdk,
|
||||||
22 => DeviceType::Server,
|
22 => DeviceType::Server,
|
||||||
|
23 => DeviceType::WindowsCLI,
|
||||||
|
24 => DeviceType::MacOsCLI,
|
||||||
|
25 => DeviceType::LinuxCLI,
|
||||||
_ => DeviceType::UnknownBrowser,
|
_ => DeviceType::UnknownBrowser,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -89,7 +89,7 @@ impl EmergencyAccess {
|
|||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
None => {
|
None => {
|
||||||
// remove outstanding invitations which should not exist
|
// remove outstanding invitations which should not exist
|
||||||
let _ = Self::delete_all_by_grantee_email(email, conn).await;
|
Self::delete_all_by_grantee_email(email, conn).await.ok();
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,3 +1,7 @@
|
|||||||
|
use super::{User, UserOrgType, UserOrganization};
|
||||||
|
use crate::api::EmptyResult;
|
||||||
|
use crate::db::DbConn;
|
||||||
|
use crate::error::MapResult;
|
||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
@@ -69,7 +73,7 @@ impl Group {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn to_json_details(&self, conn: &mut DbConn) -> Value {
|
pub async fn to_json_details(&self, user_org_type: &i32, conn: &mut DbConn) -> Value {
|
||||||
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn)
|
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn)
|
||||||
.await
|
.await
|
||||||
.iter()
|
.iter()
|
||||||
@@ -77,7 +81,8 @@ impl Group {
|
|||||||
json!({
|
json!({
|
||||||
"id": entry.collections_uuid,
|
"id": entry.collections_uuid,
|
||||||
"readOnly": entry.read_only,
|
"readOnly": entry.read_only,
|
||||||
"hidePasswords": entry.hide_passwords
|
"hidePasswords": entry.hide_passwords,
|
||||||
|
"manage": *user_org_type == UserOrgType::Manager && !entry.read_only && !entry.hide_passwords
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@@ -122,13 +127,6 @@ impl GroupUser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::DbConn;
|
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
|
||||||
use crate::error::MapResult;
|
|
||||||
|
|
||||||
use super::{User, UserOrganization};
|
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl Group {
|
impl Group {
|
||||||
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
@@ -12,6 +12,7 @@ mod org_policy;
|
|||||||
mod organization;
|
mod organization;
|
||||||
mod send;
|
mod send;
|
||||||
mod two_factor;
|
mod two_factor;
|
||||||
|
mod two_factor_duo_context;
|
||||||
mod two_factor_incomplete;
|
mod two_factor_incomplete;
|
||||||
mod user;
|
mod user;
|
||||||
|
|
||||||
@@ -29,5 +30,6 @@ pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyType};
|
|||||||
pub use self::organization::{Organization, OrganizationApiKey, UserOrgStatus, UserOrgType, UserOrganization};
|
pub use self::organization::{Organization, OrganizationApiKey, UserOrgStatus, UserOrgType, UserOrganization};
|
||||||
pub use self::send::{Send, SendType};
|
pub use self::send::{Send, SendType};
|
||||||
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
||||||
|
pub use self::two_factor_duo_context::TwoFactorDuoContext;
|
||||||
pub use self::two_factor_incomplete::TwoFactorIncomplete;
|
pub use self::two_factor_incomplete::TwoFactorIncomplete;
|
||||||
pub use self::user::{Invitation, User, UserKdfType, UserStampException};
|
pub use self::user::{Invitation, User, UserKdfType, UserStampException};
|
||||||
|
@@ -342,9 +342,11 @@ impl OrgPolicy {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn is_enabled_by_org(org_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> bool {
|
pub async fn is_enabled_for_member(org_user_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> bool {
|
||||||
if let Some(policy) = OrgPolicy::find_by_org_and_type(org_uuid, policy_type, conn).await {
|
if let Some(membership) = UserOrganization::find_by_uuid(org_user_uuid, conn).await {
|
||||||
return policy.enabled;
|
if let Some(policy) = OrgPolicy::find_by_org_and_type(&membership.org_uuid, policy_type, conn).await {
|
||||||
|
return policy.enabled;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
@@ -1,9 +1,13 @@
|
|||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
use num_traits::FromPrimitive;
|
use num_traits::FromPrimitive;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::cmp::Ordering;
|
use std::{
|
||||||
|
cmp::Ordering,
|
||||||
|
collections::{HashMap, HashSet},
|
||||||
|
};
|
||||||
|
|
||||||
use super::{CollectionUser, Group, GroupUser, OrgPolicy, OrgPolicyType, TwoFactor, User};
|
use super::{CollectionUser, Group, GroupUser, OrgPolicy, OrgPolicyType, TwoFactor, User};
|
||||||
|
use crate::db::models::{Collection, CollectionGroup};
|
||||||
use crate::CONFIG;
|
use crate::CONFIG;
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
@@ -112,7 +116,7 @@ impl PartialOrd<i32> for UserOrgType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn ge(&self, other: &i32) -> bool {
|
fn ge(&self, other: &i32) -> bool {
|
||||||
matches!(self.partial_cmp(other), Some(Ordering::Greater) | Some(Ordering::Equal))
|
matches!(self.partial_cmp(other), Some(Ordering::Greater | Ordering::Equal))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -135,7 +139,7 @@ impl PartialOrd<UserOrgType> for i32 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn le(&self, other: &UserOrgType) -> bool {
|
fn le(&self, other: &UserOrgType) -> bool {
|
||||||
matches!(self.partial_cmp(other), Some(Ordering::Less) | Some(Ordering::Equal) | None)
|
matches!(self.partial_cmp(other), Some(Ordering::Less | Ordering::Equal) | None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -156,11 +160,12 @@ impl Organization {
|
|||||||
"id": self.uuid,
|
"id": self.uuid,
|
||||||
"identifier": null, // not supported by us
|
"identifier": null, // not supported by us
|
||||||
"name": self.name,
|
"name": self.name,
|
||||||
"seats": 10, // The value doesn't matter, we don't check server-side
|
"seats": null,
|
||||||
// "maxAutoscaleSeats": null, // The value doesn't matter, we don't check server-side
|
"maxAutoscaleSeats": null,
|
||||||
"maxCollections": 10, // The value doesn't matter, we don't check server-side
|
"maxCollections": null,
|
||||||
"maxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
"maxStorageGb": i16::MAX, // The value doesn't matter, we don't check server-side
|
||||||
"use2fa": true,
|
"use2fa": true,
|
||||||
|
"useCustomPermissions": false,
|
||||||
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
||||||
"useEvents": CONFIG.org_events_enabled(),
|
"useEvents": CONFIG.org_events_enabled(),
|
||||||
"useGroups": CONFIG.org_groups_enabled(),
|
"useGroups": CONFIG.org_groups_enabled(),
|
||||||
@@ -182,8 +187,7 @@ impl Organization {
|
|||||||
"businessTaxNumber": null,
|
"businessTaxNumber": null,
|
||||||
|
|
||||||
"billingEmail": self.billing_email,
|
"billingEmail": self.billing_email,
|
||||||
"plan": "TeamsAnnually",
|
"planType": 6, // Custom plan
|
||||||
"planType": 5, // TeamsAnnually plan
|
|
||||||
"usersGetPremium": true,
|
"usersGetPremium": true,
|
||||||
"object": "organization",
|
"object": "organization",
|
||||||
})
|
})
|
||||||
@@ -369,8 +373,9 @@ impl UserOrganization {
|
|||||||
"id": self.org_uuid,
|
"id": self.org_uuid,
|
||||||
"identifier": null, // Not supported
|
"identifier": null, // Not supported
|
||||||
"name": org.name,
|
"name": org.name,
|
||||||
"seats": 10, // The value doesn't matter, we don't check server-side
|
"seats": null,
|
||||||
"maxCollections": 10, // The value doesn't matter, we don't check server-side
|
"maxAutoscaleSeats": null,
|
||||||
|
"maxCollections": null,
|
||||||
"usersGetPremium": true,
|
"usersGetPremium": true,
|
||||||
"use2fa": true,
|
"use2fa": true,
|
||||||
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
||||||
@@ -392,12 +397,14 @@ impl UserOrganization {
|
|||||||
"useCustomPermissions": false,
|
"useCustomPermissions": false,
|
||||||
"useActivateAutofillPolicy": false,
|
"useActivateAutofillPolicy": false,
|
||||||
|
|
||||||
|
"organizationUserId": self.uuid,
|
||||||
"providerId": null,
|
"providerId": null,
|
||||||
"providerName": null,
|
"providerName": null,
|
||||||
"providerType": null,
|
"providerType": null,
|
||||||
"familySponsorshipFriendlyName": null,
|
"familySponsorshipFriendlyName": null,
|
||||||
"familySponsorshipAvailable": false,
|
"familySponsorshipAvailable": false,
|
||||||
"planProductType": 0,
|
"planProductType": 3,
|
||||||
|
"productTierType": 3, // Enterprise tier
|
||||||
"keyConnectorEnabled": false,
|
"keyConnectorEnabled": false,
|
||||||
"keyConnectorUrl": null,
|
"keyConnectorUrl": null,
|
||||||
"familySponsorshipLastSyncDate": null,
|
"familySponsorshipLastSyncDate": null,
|
||||||
@@ -410,7 +417,7 @@ impl UserOrganization {
|
|||||||
|
|
||||||
"permissions": permissions,
|
"permissions": permissions,
|
||||||
|
|
||||||
"maxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
"maxStorageGb": i16::MAX, // The value doesn't matter, we don't check server-side
|
||||||
|
|
||||||
// These are per user
|
// These are per user
|
||||||
"userId": self.user_uuid,
|
"userId": self.user_uuid,
|
||||||
@@ -450,15 +457,47 @@ impl UserOrganization {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let collections: Vec<Value> = if include_collections {
|
let collections: Vec<Value> = if include_collections {
|
||||||
CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn)
|
// Get all collections for the user here already to prevent more queries
|
||||||
|
let cu: HashMap<String, CollectionUser> =
|
||||||
|
CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.map(|cu| (cu.collection_uuid.clone(), cu))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Get all collection groups for this user to prevent there inclusion
|
||||||
|
let cg: HashSet<String> = CollectionGroup::find_by_user(&self.user_uuid, conn)
|
||||||
.await
|
.await
|
||||||
.iter()
|
.into_iter()
|
||||||
.map(|cu| {
|
.map(|cg| cg.collections_uuid)
|
||||||
json!({
|
.collect();
|
||||||
"id": cu.collection_uuid,
|
|
||||||
"readOnly": cu.read_only,
|
Collection::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn)
|
||||||
"hidePasswords": cu.hide_passwords,
|
.await
|
||||||
})
|
.into_iter()
|
||||||
|
.filter_map(|c| {
|
||||||
|
let (read_only, hide_passwords, can_manage) = if self.has_full_access() {
|
||||||
|
(false, false, self.atype == UserOrgType::Manager)
|
||||||
|
} else if let Some(cu) = cu.get(&c.uuid) {
|
||||||
|
(
|
||||||
|
cu.read_only,
|
||||||
|
cu.hide_passwords,
|
||||||
|
self.atype == UserOrgType::Manager && !cu.read_only && !cu.hide_passwords,
|
||||||
|
)
|
||||||
|
// If previous checks failed it might be that this user has access via a group, but we should not return those elements here
|
||||||
|
// Those are returned via a special group endpoint
|
||||||
|
} else if cg.contains(&c.uuid) {
|
||||||
|
return None;
|
||||||
|
} else {
|
||||||
|
(true, true, false)
|
||||||
|
};
|
||||||
|
|
||||||
|
Some(json!({
|
||||||
|
"id": c.uuid,
|
||||||
|
"readOnly": read_only,
|
||||||
|
"hidePasswords": hide_passwords,
|
||||||
|
"manage": can_manage,
|
||||||
|
}))
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
} else {
|
} else {
|
||||||
@@ -471,6 +510,7 @@ impl UserOrganization {
|
|||||||
"name": user.name,
|
"name": user.name,
|
||||||
"email": user.email,
|
"email": user.email,
|
||||||
"externalId": self.external_id,
|
"externalId": self.external_id,
|
||||||
|
"avatarColor": user.avatar_color,
|
||||||
"groups": groups,
|
"groups": groups,
|
||||||
"collections": collections,
|
"collections": collections,
|
||||||
|
|
||||||
@@ -592,7 +632,7 @@ impl UserOrganization {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_email_and_org(email: &str, org_id: &str, conn: &mut DbConn) -> Option<UserOrganization> {
|
pub async fn find_by_email_and_org(email: &str, org_id: &str, conn: &mut DbConn) -> Option<UserOrganization> {
|
||||||
if let Some(user) = super::User::find_by_mail(email, conn).await {
|
if let Some(user) = User::find_by_mail(email, conn).await {
|
||||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn).await {
|
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn).await {
|
||||||
return Some(user_org);
|
return Some(user_org);
|
||||||
}
|
}
|
||||||
@@ -734,6 +774,19 @@ impl UserOrganization {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_confirmed_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||||
|
db_run! { conn: {
|
||||||
|
users_organizations::table
|
||||||
|
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||||
|
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||||
|
.filter(
|
||||||
|
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||||
|
)
|
||||||
|
.first::<UserOrganizationDb>(conn)
|
||||||
|
.ok().from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
users_organizations::table
|
users_organizations::table
|
||||||
|
84
src/db/models/two_factor_duo_context.rs
Normal file
84
src/db/models/two_factor_duo_context.rs
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
use chrono::Utc;
|
||||||
|
|
||||||
|
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
|
||||||
|
|
||||||
|
db_object! {
|
||||||
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
|
#[diesel(table_name = twofactor_duo_ctx)]
|
||||||
|
#[diesel(primary_key(state))]
|
||||||
|
pub struct TwoFactorDuoContext {
|
||||||
|
pub state: String,
|
||||||
|
pub user_email: String,
|
||||||
|
pub nonce: String,
|
||||||
|
pub exp: i64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TwoFactorDuoContext {
|
||||||
|
pub async fn find_by_state(state: &str, conn: &mut DbConn) -> Option<Self> {
|
||||||
|
db_run! {
|
||||||
|
conn: {
|
||||||
|
twofactor_duo_ctx::table
|
||||||
|
.filter(twofactor_duo_ctx::state.eq(state))
|
||||||
|
.first::<TwoFactorDuoContextDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn save(state: &str, user_email: &str, nonce: &str, ttl: i64, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
// A saved context should never be changed, only created or deleted.
|
||||||
|
let exists = Self::find_by_state(state, conn).await;
|
||||||
|
if exists.is_some() {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let exp = Utc::now().timestamp() + ttl;
|
||||||
|
|
||||||
|
db_run! {
|
||||||
|
conn: {
|
||||||
|
diesel::insert_into(twofactor_duo_ctx::table)
|
||||||
|
.values((
|
||||||
|
twofactor_duo_ctx::state.eq(state),
|
||||||
|
twofactor_duo_ctx::user_email.eq(user_email),
|
||||||
|
twofactor_duo_ctx::nonce.eq(nonce),
|
||||||
|
twofactor_duo_ctx::exp.eq(exp)
|
||||||
|
))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving context to twofactor_duo_ctx")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn find_expired(conn: &mut DbConn) -> Vec<Self> {
|
||||||
|
let now = Utc::now().timestamp();
|
||||||
|
db_run! {
|
||||||
|
conn: {
|
||||||
|
twofactor_duo_ctx::table
|
||||||
|
.filter(twofactor_duo_ctx::exp.lt(now))
|
||||||
|
.load::<TwoFactorDuoContextDb>(conn)
|
||||||
|
.expect("Error finding expired contexts in twofactor_duo_ctx")
|
||||||
|
.from_db()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
db_run! {
|
||||||
|
conn: {
|
||||||
|
diesel::delete(
|
||||||
|
twofactor_duo_ctx::table
|
||||||
|
.filter(twofactor_duo_ctx::state.eq(&self.state)))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error deleting from twofactor_duo_ctx")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn purge_expired_duo_contexts(conn: &mut DbConn) {
|
||||||
|
for context in Self::find_expired(conn).await {
|
||||||
|
context.delete(conn).await.ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -13,6 +13,7 @@ db_object! {
|
|||||||
// must complete 2FA login before being added into the devices table.
|
// must complete 2FA login before being added into the devices table.
|
||||||
pub device_uuid: String,
|
pub device_uuid: String,
|
||||||
pub device_name: String,
|
pub device_name: String,
|
||||||
|
pub device_type: i32,
|
||||||
pub login_time: NaiveDateTime,
|
pub login_time: NaiveDateTime,
|
||||||
pub ip_address: String,
|
pub ip_address: String,
|
||||||
}
|
}
|
||||||
@@ -23,6 +24,7 @@ impl TwoFactorIncomplete {
|
|||||||
user_uuid: &str,
|
user_uuid: &str,
|
||||||
device_uuid: &str,
|
device_uuid: &str,
|
||||||
device_name: &str,
|
device_name: &str,
|
||||||
|
device_type: i32,
|
||||||
ip: &ClientIp,
|
ip: &ClientIp,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
@@ -44,6 +46,7 @@ impl TwoFactorIncomplete {
|
|||||||
twofactor_incomplete::user_uuid.eq(user_uuid),
|
twofactor_incomplete::user_uuid.eq(user_uuid),
|
||||||
twofactor_incomplete::device_uuid.eq(device_uuid),
|
twofactor_incomplete::device_uuid.eq(device_uuid),
|
||||||
twofactor_incomplete::device_name.eq(device_name),
|
twofactor_incomplete::device_name.eq(device_name),
|
||||||
|
twofactor_incomplete::device_type.eq(device_type),
|
||||||
twofactor_incomplete::login_time.eq(Utc::now().naive_utc()),
|
twofactor_incomplete::login_time.eq(Utc::now().naive_utc()),
|
||||||
twofactor_incomplete::ip_address.eq(ip.ip.to_string()),
|
twofactor_incomplete::ip_address.eq(ip.ip.to_string()),
|
||||||
))
|
))
|
||||||
|
@@ -144,14 +144,14 @@ impl User {
|
|||||||
|
|
||||||
pub fn check_valid_recovery_code(&self, recovery_code: &str) -> bool {
|
pub fn check_valid_recovery_code(&self, recovery_code: &str) -> bool {
|
||||||
if let Some(ref totp_recover) = self.totp_recover {
|
if let Some(ref totp_recover) = self.totp_recover {
|
||||||
crate::crypto::ct_eq(recovery_code, totp_recover.to_lowercase())
|
crypto::ct_eq(recovery_code, totp_recover.to_lowercase())
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn check_valid_api_key(&self, key: &str) -> bool {
|
pub fn check_valid_api_key(&self, key: &str) -> bool {
|
||||||
matches!(self.api_key, Some(ref api_key) if crate::crypto::ct_eq(api_key, key))
|
matches!(self.api_key, Some(ref api_key) if crypto::ct_eq(api_key, key))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the password hash generated
|
/// Set the password hash generated
|
||||||
|
@@ -169,11 +169,21 @@ table! {
|
|||||||
user_uuid -> Text,
|
user_uuid -> Text,
|
||||||
device_uuid -> Text,
|
device_uuid -> Text,
|
||||||
device_name -> Text,
|
device_name -> Text,
|
||||||
|
device_type -> Integer,
|
||||||
login_time -> Timestamp,
|
login_time -> Timestamp,
|
||||||
ip_address -> Text,
|
ip_address -> Text,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table! {
|
||||||
|
twofactor_duo_ctx (state) {
|
||||||
|
state -> Text,
|
||||||
|
user_email -> Text,
|
||||||
|
nonce -> Text,
|
||||||
|
exp -> BigInt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
users (uuid) {
|
users (uuid) {
|
||||||
uuid -> Text,
|
uuid -> Text,
|
||||||
|
@@ -169,11 +169,21 @@ table! {
|
|||||||
user_uuid -> Text,
|
user_uuid -> Text,
|
||||||
device_uuid -> Text,
|
device_uuid -> Text,
|
||||||
device_name -> Text,
|
device_name -> Text,
|
||||||
|
device_type -> Integer,
|
||||||
login_time -> Timestamp,
|
login_time -> Timestamp,
|
||||||
ip_address -> Text,
|
ip_address -> Text,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table! {
|
||||||
|
twofactor_duo_ctx (state) {
|
||||||
|
state -> Text,
|
||||||
|
user_email -> Text,
|
||||||
|
nonce -> Text,
|
||||||
|
exp -> BigInt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
users (uuid) {
|
users (uuid) {
|
||||||
uuid -> Text,
|
uuid -> Text,
|
||||||
|
@@ -169,11 +169,21 @@ table! {
|
|||||||
user_uuid -> Text,
|
user_uuid -> Text,
|
||||||
device_uuid -> Text,
|
device_uuid -> Text,
|
||||||
device_name -> Text,
|
device_name -> Text,
|
||||||
|
device_type -> Integer,
|
||||||
login_time -> Timestamp,
|
login_time -> Timestamp,
|
||||||
ip_address -> Text,
|
ip_address -> Text,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table! {
|
||||||
|
twofactor_duo_ctx (state) {
|
||||||
|
state -> Text,
|
||||||
|
user_email -> Text,
|
||||||
|
nonce -> Text,
|
||||||
|
exp -> BigInt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
users (uuid) {
|
users (uuid) {
|
||||||
uuid -> Text,
|
uuid -> Text,
|
||||||
|
@@ -2,6 +2,7 @@
|
|||||||
// Error generator macro
|
// Error generator macro
|
||||||
//
|
//
|
||||||
use crate::db::models::EventType;
|
use crate::db::models::EventType;
|
||||||
|
use crate::http_client::CustomHttpClientError;
|
||||||
use std::error::Error as StdError;
|
use std::error::Error as StdError;
|
||||||
|
|
||||||
macro_rules! make_error {
|
macro_rules! make_error {
|
||||||
@@ -68,6 +69,10 @@ make_error! {
|
|||||||
Empty(Empty): _no_source, _serialize,
|
Empty(Empty): _no_source, _serialize,
|
||||||
// Used to represent err! calls
|
// Used to represent err! calls
|
||||||
Simple(String): _no_source, _api_error,
|
Simple(String): _no_source, _api_error,
|
||||||
|
|
||||||
|
// Used in our custom http client to handle non-global IPs and blocked domains
|
||||||
|
CustomHttpClient(CustomHttpClientError): _has_source, _api_error,
|
||||||
|
|
||||||
// Used for special return values, like 2FA errors
|
// Used for special return values, like 2FA errors
|
||||||
Json(Value): _no_source, _serialize,
|
Json(Value): _no_source, _serialize,
|
||||||
Db(DieselErr): _has_source, _api_error,
|
Db(DieselErr): _has_source, _api_error,
|
||||||
@@ -204,7 +209,7 @@ use rocket::http::{ContentType, Status};
|
|||||||
use rocket::request::Request;
|
use rocket::request::Request;
|
||||||
use rocket::response::{self, Responder, Response};
|
use rocket::response::{self, Responder, Response};
|
||||||
|
|
||||||
impl<'r> Responder<'r, 'static> for Error {
|
impl Responder<'_, 'static> for Error {
|
||||||
fn respond_to(self, _: &Request<'_>) -> response::Result<'static> {
|
fn respond_to(self, _: &Request<'_>) -> response::Result<'static> {
|
||||||
match self.error {
|
match self.error {
|
||||||
ErrorKind::Empty(_) => {} // Don't print the error in this situation
|
ErrorKind::Empty(_) => {} // Don't print the error in this situation
|
||||||
|
246
src/http_client.rs
Normal file
246
src/http_client.rs
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
use std::{
|
||||||
|
fmt,
|
||||||
|
net::{IpAddr, SocketAddr},
|
||||||
|
str::FromStr,
|
||||||
|
sync::{Arc, Mutex},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use hickory_resolver::{system_conf::read_system_conf, TokioAsyncResolver};
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use regex::Regex;
|
||||||
|
use reqwest::{
|
||||||
|
dns::{Name, Resolve, Resolving},
|
||||||
|
header, Client, ClientBuilder,
|
||||||
|
};
|
||||||
|
use url::Host;
|
||||||
|
|
||||||
|
use crate::{util::is_global, CONFIG};
|
||||||
|
|
||||||
|
pub fn make_http_request(method: reqwest::Method, url: &str) -> Result<reqwest::RequestBuilder, crate::Error> {
|
||||||
|
let Ok(url) = url::Url::parse(url) else {
|
||||||
|
err!("Invalid URL");
|
||||||
|
};
|
||||||
|
let Some(host) = url.host() else {
|
||||||
|
err!("Invalid host");
|
||||||
|
};
|
||||||
|
|
||||||
|
should_block_host(host)?;
|
||||||
|
|
||||||
|
static INSTANCE: Lazy<Client> = Lazy::new(|| get_reqwest_client_builder().build().expect("Failed to build client"));
|
||||||
|
|
||||||
|
Ok(INSTANCE.request(method, url))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_reqwest_client_builder() -> ClientBuilder {
|
||||||
|
let mut headers = header::HeaderMap::new();
|
||||||
|
headers.insert(header::USER_AGENT, header::HeaderValue::from_static("Vaultwarden"));
|
||||||
|
|
||||||
|
let redirect_policy = reqwest::redirect::Policy::custom(|attempt| {
|
||||||
|
if attempt.previous().len() >= 5 {
|
||||||
|
return attempt.error("Too many redirects");
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(host) = attempt.url().host() else {
|
||||||
|
return attempt.error("Invalid host");
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = should_block_host(host) {
|
||||||
|
return attempt.error(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
attempt.follow()
|
||||||
|
});
|
||||||
|
|
||||||
|
Client::builder()
|
||||||
|
.default_headers(headers)
|
||||||
|
.redirect(redirect_policy)
|
||||||
|
.dns_resolver(CustomDnsResolver::instance())
|
||||||
|
.timeout(Duration::from_secs(10))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn should_block_address(domain_or_ip: &str) -> bool {
|
||||||
|
if let Ok(ip) = IpAddr::from_str(domain_or_ip) {
|
||||||
|
if should_block_ip(ip) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
should_block_address_regex(domain_or_ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn should_block_ip(ip: IpAddr) -> bool {
|
||||||
|
if !CONFIG.http_request_block_non_global_ips() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
!is_global(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn should_block_address_regex(domain_or_ip: &str) -> bool {
|
||||||
|
let Some(block_regex) = CONFIG.http_request_block_regex() else {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
static COMPILED_REGEX: Mutex<Option<(String, Regex)>> = Mutex::new(None);
|
||||||
|
let mut guard = COMPILED_REGEX.lock().unwrap();
|
||||||
|
|
||||||
|
// If the stored regex is up to date, use it
|
||||||
|
if let Some((value, regex)) = &*guard {
|
||||||
|
if value == &block_regex {
|
||||||
|
return regex.is_match(domain_or_ip);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have a regex stored, or it's not up to date, recreate it
|
||||||
|
let regex = Regex::new(&block_regex).unwrap();
|
||||||
|
let is_match = regex.is_match(domain_or_ip);
|
||||||
|
*guard = Some((block_regex, regex));
|
||||||
|
|
||||||
|
is_match
|
||||||
|
}
|
||||||
|
|
||||||
|
fn should_block_host(host: Host<&str>) -> Result<(), CustomHttpClientError> {
|
||||||
|
let (ip, host_str): (Option<IpAddr>, String) = match host {
|
||||||
|
Host::Ipv4(ip) => (Some(ip.into()), ip.to_string()),
|
||||||
|
Host::Ipv6(ip) => (Some(ip.into()), ip.to_string()),
|
||||||
|
Host::Domain(d) => (None, d.to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(ip) = ip {
|
||||||
|
if should_block_ip(ip) {
|
||||||
|
return Err(CustomHttpClientError::NonGlobalIp {
|
||||||
|
domain: None,
|
||||||
|
ip,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if should_block_address_regex(&host_str) {
|
||||||
|
return Err(CustomHttpClientError::Blocked {
|
||||||
|
domain: host_str,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum CustomHttpClientError {
|
||||||
|
Blocked {
|
||||||
|
domain: String,
|
||||||
|
},
|
||||||
|
NonGlobalIp {
|
||||||
|
domain: Option<String>,
|
||||||
|
ip: IpAddr,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CustomHttpClientError {
|
||||||
|
pub fn downcast_ref(e: &dyn std::error::Error) -> Option<&Self> {
|
||||||
|
let mut source = e.source();
|
||||||
|
|
||||||
|
while let Some(err) = source {
|
||||||
|
source = err.source();
|
||||||
|
if let Some(err) = err.downcast_ref::<CustomHttpClientError>() {
|
||||||
|
return Some(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for CustomHttpClientError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::Blocked {
|
||||||
|
domain,
|
||||||
|
} => write!(f, "Blocked domain: {domain} matched HTTP_REQUEST_BLOCK_REGEX"),
|
||||||
|
Self::NonGlobalIp {
|
||||||
|
domain: Some(domain),
|
||||||
|
ip,
|
||||||
|
} => write!(f, "IP {ip} for domain '{domain}' is not a global IP!"),
|
||||||
|
Self::NonGlobalIp {
|
||||||
|
domain: None,
|
||||||
|
ip,
|
||||||
|
} => write!(f, "IP {ip} is not a global IP!"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::error::Error for CustomHttpClientError {}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
enum CustomDnsResolver {
|
||||||
|
Default(),
|
||||||
|
Hickory(Arc<TokioAsyncResolver>),
|
||||||
|
}
|
||||||
|
type BoxError = Box<dyn std::error::Error + Send + Sync>;
|
||||||
|
|
||||||
|
impl CustomDnsResolver {
|
||||||
|
fn instance() -> Arc<Self> {
|
||||||
|
static INSTANCE: Lazy<Arc<CustomDnsResolver>> = Lazy::new(CustomDnsResolver::new);
|
||||||
|
Arc::clone(&*INSTANCE)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new() -> Arc<Self> {
|
||||||
|
match read_system_conf() {
|
||||||
|
Ok((config, opts)) => {
|
||||||
|
let resolver = TokioAsyncResolver::tokio(config.clone(), opts.clone());
|
||||||
|
Arc::new(Self::Hickory(Arc::new(resolver)))
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
|
||||||
|
Arc::new(Self::Default())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that we get an iterator of addresses, but we only grab the first one for convenience
|
||||||
|
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
|
||||||
|
pre_resolve(name)?;
|
||||||
|
|
||||||
|
let result = match self {
|
||||||
|
Self::Default() => tokio::net::lookup_host(name).await?.next(),
|
||||||
|
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(addr) = &result {
|
||||||
|
post_resolve(name, addr.ip())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pre_resolve(name: &str) -> Result<(), CustomHttpClientError> {
|
||||||
|
if should_block_address(name) {
|
||||||
|
return Err(CustomHttpClientError::Blocked {
|
||||||
|
domain: name.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn post_resolve(name: &str, ip: IpAddr) -> Result<(), CustomHttpClientError> {
|
||||||
|
if should_block_ip(ip) {
|
||||||
|
Err(CustomHttpClientError::NonGlobalIp {
|
||||||
|
domain: Some(name.to_string()),
|
||||||
|
ip,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Resolve for CustomDnsResolver {
|
||||||
|
fn resolve(&self, name: Name) -> Resolving {
|
||||||
|
let this = self.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
let name = name.as_str();
|
||||||
|
let result = this.resolve_domain(name).await?;
|
||||||
|
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
55
src/mail.rs
55
src/mail.rs
@@ -17,6 +17,7 @@ use crate::{
|
|||||||
encode_jwt, generate_delete_claims, generate_emergency_access_invite_claims, generate_invite_claims,
|
encode_jwt, generate_delete_claims, generate_emergency_access_invite_claims, generate_invite_claims,
|
||||||
generate_verify_email_claims,
|
generate_verify_email_claims,
|
||||||
},
|
},
|
||||||
|
db::models::{Device, DeviceType, User},
|
||||||
error::Error,
|
error::Error,
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
@@ -229,37 +230,51 @@ pub async fn send_single_org_removed_from_org(address: &str, org_name: &str) ->
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_invite(
|
pub async fn send_invite(
|
||||||
address: &str,
|
user: &User,
|
||||||
uuid: &str,
|
|
||||||
org_id: Option<String>,
|
org_id: Option<String>,
|
||||||
org_user_id: Option<String>,
|
org_user_id: Option<String>,
|
||||||
org_name: &str,
|
org_name: &str,
|
||||||
invited_by_email: Option<String>,
|
invited_by_email: Option<String>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let claims = generate_invite_claims(
|
let claims = generate_invite_claims(
|
||||||
uuid.to_string(),
|
user.uuid.clone(),
|
||||||
String::from(address),
|
user.email.clone(),
|
||||||
org_id.clone(),
|
org_id.clone(),
|
||||||
org_user_id.clone(),
|
org_user_id.clone(),
|
||||||
invited_by_email,
|
invited_by_email,
|
||||||
);
|
);
|
||||||
let invite_token = encode_jwt(&claims);
|
let invite_token = encode_jwt(&claims);
|
||||||
|
let mut query = url::Url::parse("https://query.builder").unwrap();
|
||||||
|
{
|
||||||
|
let mut query_params = query.query_pairs_mut();
|
||||||
|
query_params
|
||||||
|
.append_pair("email", &user.email)
|
||||||
|
.append_pair("organizationName", org_name)
|
||||||
|
.append_pair("organizationId", org_id.as_deref().unwrap_or("_"))
|
||||||
|
.append_pair("organizationUserId", org_user_id.as_deref().unwrap_or("_"))
|
||||||
|
.append_pair("token", &invite_token);
|
||||||
|
if user.private_key.is_some() {
|
||||||
|
query_params.append_pair("orgUserHasExistingUser", "true");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let query_string = match query.query() {
|
||||||
|
None => err!(format!("Failed to build invite URL query parameters")),
|
||||||
|
Some(query) => query,
|
||||||
|
};
|
||||||
|
|
||||||
|
// `url.Url` would place the anchor `#` after the query parameters
|
||||||
|
let url = format!("{}/#/accept-organization/?{}", CONFIG.domain(), query_string);
|
||||||
let (subject, body_html, body_text) = get_text(
|
let (subject, body_html, body_text) = get_text(
|
||||||
"email/send_org_invite",
|
"email/send_org_invite",
|
||||||
json!({
|
json!({
|
||||||
"url": CONFIG.domain(),
|
"url": url,
|
||||||
"img_src": CONFIG._smtp_img_src(),
|
"img_src": CONFIG._smtp_img_src(),
|
||||||
"org_id": org_id.as_deref().unwrap_or("_"),
|
|
||||||
"org_user_id": org_user_id.as_deref().unwrap_or("_"),
|
|
||||||
"email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(),
|
|
||||||
"org_name_encoded": percent_encode(org_name.as_bytes(), NON_ALPHANUMERIC).to_string(),
|
|
||||||
"org_name": org_name,
|
"org_name": org_name,
|
||||||
"token": invite_token,
|
|
||||||
}),
|
}),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
send_email(address, &subject, body_html, body_text).await
|
send_email(&user.email, &subject, body_html, body_text).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_emergency_access_invite(
|
pub async fn send_emergency_access_invite(
|
||||||
@@ -427,9 +442,8 @@ pub async fn send_invite_confirmed(address: &str, org_name: &str) -> EmptyResult
|
|||||||
send_email(address, &subject, body_html, body_text).await
|
send_email(address, &subject, body_html, body_text).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult {
|
pub async fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTime, device: &Device) -> EmptyResult {
|
||||||
use crate::util::upcase_first;
|
use crate::util::upcase_first;
|
||||||
let device = upcase_first(device);
|
|
||||||
|
|
||||||
let fmt = "%A, %B %_d, %Y at %r %Z";
|
let fmt = "%A, %B %_d, %Y at %r %Z";
|
||||||
let (subject, body_html, body_text) = get_text(
|
let (subject, body_html, body_text) = get_text(
|
||||||
@@ -438,7 +452,8 @@ pub async fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTi
|
|||||||
"url": CONFIG.domain(),
|
"url": CONFIG.domain(),
|
||||||
"img_src": CONFIG._smtp_img_src(),
|
"img_src": CONFIG._smtp_img_src(),
|
||||||
"ip": ip,
|
"ip": ip,
|
||||||
"device": device,
|
"device_name": upcase_first(&device.name),
|
||||||
|
"device_type": DeviceType::from_i32(device.atype).to_string(),
|
||||||
"datetime": crate::util::format_naive_datetime_local(dt, fmt),
|
"datetime": crate::util::format_naive_datetime_local(dt, fmt),
|
||||||
}),
|
}),
|
||||||
)?;
|
)?;
|
||||||
@@ -446,9 +461,14 @@ pub async fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTi
|
|||||||
send_email(address, &subject, body_html, body_text).await
|
send_email(address, &subject, body_html, body_text).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_incomplete_2fa_login(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult {
|
pub async fn send_incomplete_2fa_login(
|
||||||
|
address: &str,
|
||||||
|
ip: &str,
|
||||||
|
dt: &NaiveDateTime,
|
||||||
|
device_name: &str,
|
||||||
|
device_type: &str,
|
||||||
|
) -> EmptyResult {
|
||||||
use crate::util::upcase_first;
|
use crate::util::upcase_first;
|
||||||
let device = upcase_first(device);
|
|
||||||
|
|
||||||
let fmt = "%A, %B %_d, %Y at %r %Z";
|
let fmt = "%A, %B %_d, %Y at %r %Z";
|
||||||
let (subject, body_html, body_text) = get_text(
|
let (subject, body_html, body_text) = get_text(
|
||||||
@@ -457,7 +477,8 @@ pub async fn send_incomplete_2fa_login(address: &str, ip: &str, dt: &NaiveDateTi
|
|||||||
"url": CONFIG.domain(),
|
"url": CONFIG.domain(),
|
||||||
"img_src": CONFIG._smtp_img_src(),
|
"img_src": CONFIG._smtp_img_src(),
|
||||||
"ip": ip,
|
"ip": ip,
|
||||||
"device": device,
|
"device_name": upcase_first(device_name),
|
||||||
|
"device_type": device_type,
|
||||||
"datetime": crate::util::format_naive_datetime_local(dt, fmt),
|
"datetime": crate::util::format_naive_datetime_local(dt, fmt),
|
||||||
"time_limit": CONFIG.incomplete_2fa_time_limit(),
|
"time_limit": CONFIG.incomplete_2fa_time_limit(),
|
||||||
}),
|
}),
|
||||||
|
221
src/main.rs
221
src/main.rs
@@ -26,6 +26,7 @@ extern crate diesel;
|
|||||||
extern crate diesel_migrations;
|
extern crate diesel_migrations;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
fs::{canonicalize, create_dir_all},
|
fs::{canonicalize, create_dir_all},
|
||||||
panic,
|
panic,
|
||||||
path::Path,
|
path::Path,
|
||||||
@@ -37,6 +38,7 @@ use std::{
|
|||||||
use tokio::{
|
use tokio::{
|
||||||
fs::File,
|
fs::File,
|
||||||
io::{AsyncBufReadExt, BufReader},
|
io::{AsyncBufReadExt, BufReader},
|
||||||
|
signal::unix::SignalKind,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
@@ -47,10 +49,12 @@ mod config;
|
|||||||
mod crypto;
|
mod crypto;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
mod db;
|
mod db;
|
||||||
|
mod http_client;
|
||||||
mod mail;
|
mod mail;
|
||||||
mod ratelimit;
|
mod ratelimit;
|
||||||
mod util;
|
mod util;
|
||||||
|
|
||||||
|
use crate::api::core::two_factor::duo_oidc::purge_duo_contexts;
|
||||||
use crate::api::purge_auth_requests;
|
use crate::api::purge_auth_requests;
|
||||||
use crate::api::{WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS};
|
use crate::api::{WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS};
|
||||||
pub use config::CONFIG;
|
pub use config::CONFIG;
|
||||||
@@ -64,19 +68,11 @@ async fn main() -> Result<(), Error> {
|
|||||||
parse_args();
|
parse_args();
|
||||||
launch_info();
|
launch_info();
|
||||||
|
|
||||||
use log::LevelFilter as LF;
|
let level = init_logging()?;
|
||||||
let level = LF::from_str(&CONFIG.log_level()).unwrap_or_else(|_| {
|
|
||||||
let valid_log_levels = LF::iter().map(|lvl| lvl.as_str().to_lowercase()).collect::<Vec<String>>().join(", ");
|
|
||||||
println!("Log level must be one of the following: {valid_log_levels}");
|
|
||||||
exit(1);
|
|
||||||
});
|
|
||||||
init_logging(level).ok();
|
|
||||||
|
|
||||||
let extra_debug = matches!(level, LF::Trace | LF::Debug);
|
|
||||||
|
|
||||||
check_data_folder().await;
|
check_data_folder().await;
|
||||||
auth::initialize_keys().unwrap_or_else(|_| {
|
auth::initialize_keys().unwrap_or_else(|e| {
|
||||||
error!("Error creating keys, exiting...");
|
error!("Error creating private key '{}'\n{e:?}\nExiting Vaultwarden!", CONFIG.private_rsa_key());
|
||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
check_web_vault();
|
check_web_vault();
|
||||||
@@ -88,8 +84,9 @@ async fn main() -> Result<(), Error> {
|
|||||||
|
|
||||||
let pool = create_db_pool().await;
|
let pool = create_db_pool().await;
|
||||||
schedule_jobs(pool.clone());
|
schedule_jobs(pool.clone());
|
||||||
crate::db::models::TwoFactor::migrate_u2f_to_webauthn(&mut pool.get().await.unwrap()).await.unwrap();
|
db::models::TwoFactor::migrate_u2f_to_webauthn(&mut pool.get().await.unwrap()).await.unwrap();
|
||||||
|
|
||||||
|
let extra_debug = matches!(level, log::LevelFilter::Trace | log::LevelFilter::Debug);
|
||||||
launch_rocket(pool, extra_debug).await // Blocks until program termination.
|
launch_rocket(pool, extra_debug).await // Blocks until program termination.
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,10 +98,12 @@ USAGE:
|
|||||||
|
|
||||||
FLAGS:
|
FLAGS:
|
||||||
-h, --help Prints help information
|
-h, --help Prints help information
|
||||||
-v, --version Prints the app version
|
-v, --version Prints the app and web-vault version
|
||||||
|
|
||||||
COMMAND:
|
COMMAND:
|
||||||
hash [--preset {bitwarden|owasp}] Generate an Argon2id PHC ADMIN_TOKEN
|
hash [--preset {bitwarden|owasp}] Generate an Argon2id PHC ADMIN_TOKEN
|
||||||
|
backup Create a backup of the SQLite database
|
||||||
|
You can also send the USR1 signal to trigger a backup
|
||||||
|
|
||||||
PRESETS: m= t= p=
|
PRESETS: m= t= p=
|
||||||
bitwarden (default) 64MiB, 3 Iterations, 4 Threads
|
bitwarden (default) 64MiB, 3 Iterations, 4 Threads
|
||||||
@@ -119,11 +118,13 @@ fn parse_args() {
|
|||||||
let version = VERSION.unwrap_or("(Version info from Git not present)");
|
let version = VERSION.unwrap_or("(Version info from Git not present)");
|
||||||
|
|
||||||
if pargs.contains(["-h", "--help"]) {
|
if pargs.contains(["-h", "--help"]) {
|
||||||
println!("vaultwarden {version}");
|
println!("Vaultwarden {version}");
|
||||||
print!("{HELP}");
|
print!("{HELP}");
|
||||||
exit(0);
|
exit(0);
|
||||||
} else if pargs.contains(["-v", "--version"]) {
|
} else if pargs.contains(["-v", "--version"]) {
|
||||||
println!("vaultwarden {version}");
|
let web_vault_version = util::get_web_vault_version();
|
||||||
|
println!("Vaultwarden {version}");
|
||||||
|
println!("Web-Vault {web_vault_version}");
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,7 +168,7 @@ fn parse_args() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let argon2 = Argon2::new(Argon2id, V0x13, argon2_params.build().unwrap());
|
let argon2 = Argon2::new(Argon2id, V0x13, argon2_params.build().unwrap());
|
||||||
let salt = SaltString::encode_b64(&crate::crypto::get_random_bytes::<32>()).unwrap();
|
let salt = SaltString::encode_b64(&crypto::get_random_bytes::<32>()).unwrap();
|
||||||
|
|
||||||
let argon2_timer = tokio::time::Instant::now();
|
let argon2_timer = tokio::time::Instant::now();
|
||||||
if let Ok(password_hash) = argon2.hash_password(password.as_bytes(), &salt) {
|
if let Ok(password_hash) = argon2.hash_password(password.as_bytes(), &salt) {
|
||||||
@@ -178,13 +179,47 @@ fn parse_args() {
|
|||||||
argon2_timer.elapsed()
|
argon2_timer.elapsed()
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
error!("Unable to generate Argon2id PHC hash.");
|
println!("Unable to generate Argon2id PHC hash.");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
} else if command == "backup" {
|
||||||
|
match backup_sqlite() {
|
||||||
|
Ok(f) => {
|
||||||
|
println!("Backup to '{f}' was successful");
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!("Backup failed. {e:?}");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn backup_sqlite() -> Result<String, Error> {
|
||||||
|
#[cfg(sqlite)]
|
||||||
|
{
|
||||||
|
use crate::db::{backup_sqlite_database, DbConnType};
|
||||||
|
if DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false) {
|
||||||
|
use diesel::Connection;
|
||||||
|
let url = CONFIG.database_url();
|
||||||
|
|
||||||
|
// Establish a connection to the sqlite database
|
||||||
|
let mut conn = diesel::sqlite::SqliteConnection::establish(&url)?;
|
||||||
|
let backup_file = backup_sqlite_database(&mut conn)?;
|
||||||
|
Ok(backup_file)
|
||||||
|
} else {
|
||||||
|
err_silent!("The database type is not SQLite. Backups only works for SQLite databases")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(not(sqlite))]
|
||||||
|
{
|
||||||
|
err_silent!("The 'sqlite' feature is not enabled. Backups only works for SQLite databases")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn launch_info() {
|
fn launch_info() {
|
||||||
println!(
|
println!(
|
||||||
"\
|
"\
|
||||||
@@ -210,7 +245,38 @@ fn launch_info() {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
fn init_logging() -> Result<log::LevelFilter, Error> {
|
||||||
|
let levels = log::LevelFilter::iter().map(|lvl| lvl.as_str().to_lowercase()).collect::<Vec<String>>().join("|");
|
||||||
|
let log_level_rgx_str = format!("^({levels})((,[^,=]+=({levels}))*)$");
|
||||||
|
let log_level_rgx = regex::Regex::new(&log_level_rgx_str)?;
|
||||||
|
let config_str = CONFIG.log_level().to_lowercase();
|
||||||
|
|
||||||
|
let (level, levels_override) = if let Some(caps) = log_level_rgx.captures(&config_str) {
|
||||||
|
let level = caps
|
||||||
|
.get(1)
|
||||||
|
.and_then(|m| log::LevelFilter::from_str(m.as_str()).ok())
|
||||||
|
.ok_or(Error::new("Failed to parse global log level".to_string(), ""))?;
|
||||||
|
|
||||||
|
let levels_override: Vec<(&str, log::LevelFilter)> = caps
|
||||||
|
.get(2)
|
||||||
|
.map(|m| {
|
||||||
|
m.as_str()
|
||||||
|
.split(',')
|
||||||
|
.collect::<Vec<&str>>()
|
||||||
|
.into_iter()
|
||||||
|
.flat_map(|s| match s.split('=').collect::<Vec<&str>>()[..] {
|
||||||
|
[log, lvl_str] => log::LevelFilter::from_str(lvl_str).ok().map(|lvl| (log, lvl)),
|
||||||
|
_ => None,
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
|
.ok_or(Error::new("Failed to parse overrides".to_string(), ""))?;
|
||||||
|
|
||||||
|
(level, levels_override)
|
||||||
|
} else {
|
||||||
|
err!(format!("LOG_LEVEL should follow the format info,vaultwarden::api::icons=debug, invalid: {config_str}"))
|
||||||
|
};
|
||||||
|
|
||||||
// Depending on the main log level we either want to disable or enable logging for hickory.
|
// Depending on the main log level we either want to disable or enable logging for hickory.
|
||||||
// Else if there are timeouts it will clutter the logs since hickory uses warn for this.
|
// Else if there are timeouts it will clutter the logs since hickory uses warn for this.
|
||||||
let hickory_level = if level >= log::LevelFilter::Debug {
|
let hickory_level = if level >= log::LevelFilter::Debug {
|
||||||
@@ -241,47 +307,61 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
|||||||
log::LevelFilter::Warn
|
log::LevelFilter::Warn
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut logger = fern::Dispatch::new()
|
|
||||||
.level(level)
|
|
||||||
// Hide unknown certificate errors if using self-signed
|
|
||||||
.level_for("rustls::session", log::LevelFilter::Off)
|
|
||||||
// Hide failed to close stream messages
|
|
||||||
.level_for("hyper::server", log::LevelFilter::Warn)
|
|
||||||
// Silence Rocket `_` logs
|
|
||||||
.level_for("_", rocket_underscore_level)
|
|
||||||
.level_for("rocket::response::responder::_", rocket_underscore_level)
|
|
||||||
.level_for("rocket::server::_", rocket_underscore_level)
|
|
||||||
.level_for("vaultwarden::api::admin::_", rocket_underscore_level)
|
|
||||||
.level_for("vaultwarden::api::notifications::_", rocket_underscore_level)
|
|
||||||
// Silence Rocket logs
|
|
||||||
.level_for("rocket::launch", log::LevelFilter::Error)
|
|
||||||
.level_for("rocket::launch_", log::LevelFilter::Error)
|
|
||||||
.level_for("rocket::rocket", log::LevelFilter::Warn)
|
|
||||||
.level_for("rocket::server", log::LevelFilter::Warn)
|
|
||||||
.level_for("rocket::fairing::fairings", log::LevelFilter::Warn)
|
|
||||||
.level_for("rocket::shield::shield", log::LevelFilter::Warn)
|
|
||||||
.level_for("hyper::proto", log::LevelFilter::Off)
|
|
||||||
.level_for("hyper::client", log::LevelFilter::Off)
|
|
||||||
// Filter handlebars logs
|
|
||||||
.level_for("handlebars::render", handlebars_level)
|
|
||||||
// Prevent cookie_store logs
|
|
||||||
.level_for("cookie_store", log::LevelFilter::Off)
|
|
||||||
// Variable level for hickory used by reqwest
|
|
||||||
.level_for("hickory_resolver::name_server::name_server", hickory_level)
|
|
||||||
.level_for("hickory_proto::xfer", hickory_level)
|
|
||||||
.level_for("diesel_logger", diesel_logger_level)
|
|
||||||
.chain(std::io::stdout());
|
|
||||||
|
|
||||||
// Enable smtp debug logging only specifically for smtp when need.
|
// Enable smtp debug logging only specifically for smtp when need.
|
||||||
// This can contain sensitive information we do not want in the default debug/trace logging.
|
// This can contain sensitive information we do not want in the default debug/trace logging.
|
||||||
if CONFIG.smtp_debug() {
|
let smtp_log_level = if CONFIG.smtp_debug() {
|
||||||
|
log::LevelFilter::Debug
|
||||||
|
} else {
|
||||||
|
log::LevelFilter::Off
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut default_levels = HashMap::from([
|
||||||
|
// Hide unknown certificate errors if using self-signed
|
||||||
|
("rustls::session", log::LevelFilter::Off),
|
||||||
|
// Hide failed to close stream messages
|
||||||
|
("hyper::server", log::LevelFilter::Warn),
|
||||||
|
// Silence Rocket `_` logs
|
||||||
|
("_", rocket_underscore_level),
|
||||||
|
("rocket::response::responder::_", rocket_underscore_level),
|
||||||
|
("rocket::server::_", rocket_underscore_level),
|
||||||
|
("vaultwarden::api::admin::_", rocket_underscore_level),
|
||||||
|
("vaultwarden::api::notifications::_", rocket_underscore_level),
|
||||||
|
// Silence Rocket logs
|
||||||
|
("rocket::launch", log::LevelFilter::Error),
|
||||||
|
("rocket::launch_", log::LevelFilter::Error),
|
||||||
|
("rocket::rocket", log::LevelFilter::Warn),
|
||||||
|
("rocket::server", log::LevelFilter::Warn),
|
||||||
|
("rocket::fairing::fairings", log::LevelFilter::Warn),
|
||||||
|
("rocket::shield::shield", log::LevelFilter::Warn),
|
||||||
|
("hyper::proto", log::LevelFilter::Off),
|
||||||
|
("hyper::client", log::LevelFilter::Off),
|
||||||
|
// Filter handlebars logs
|
||||||
|
("handlebars::render", handlebars_level),
|
||||||
|
// Prevent cookie_store logs
|
||||||
|
("cookie_store", log::LevelFilter::Off),
|
||||||
|
// Variable level for hickory used by reqwest
|
||||||
|
("hickory_resolver::name_server::name_server", hickory_level),
|
||||||
|
("hickory_proto::xfer", hickory_level),
|
||||||
|
("diesel_logger", diesel_logger_level),
|
||||||
|
// SMTP
|
||||||
|
("lettre::transport::smtp", smtp_log_level),
|
||||||
|
]);
|
||||||
|
|
||||||
|
for (path, level) in levels_override.into_iter() {
|
||||||
|
let _ = default_levels.insert(path, level);
|
||||||
|
}
|
||||||
|
|
||||||
|
if Some(&log::LevelFilter::Debug) == default_levels.get("lettre::transport::smtp") {
|
||||||
println!(
|
println!(
|
||||||
"[WARNING] SMTP Debugging is enabled (SMTP_DEBUG=true). Sensitive information could be disclosed via logs!\n\
|
"[WARNING] SMTP Debugging is enabled (SMTP_DEBUG=true). Sensitive information could be disclosed via logs!\n\
|
||||||
[WARNING] Only enable SMTP_DEBUG during troubleshooting!\n"
|
[WARNING] Only enable SMTP_DEBUG during troubleshooting!\n"
|
||||||
);
|
);
|
||||||
logger = logger.level_for("lettre::transport::smtp", log::LevelFilter::Debug)
|
}
|
||||||
} else {
|
|
||||||
logger = logger.level_for("lettre::transport::smtp", log::LevelFilter::Off)
|
let mut logger = fern::Dispatch::new().level(level).chain(std::io::stdout());
|
||||||
|
|
||||||
|
for (path, level) in default_levels {
|
||||||
|
logger = logger.level_for(path.to_string(), level);
|
||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.extended_logging() {
|
if CONFIG.extended_logging() {
|
||||||
@@ -305,7 +385,7 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
|||||||
}
|
}
|
||||||
#[cfg(not(windows))]
|
#[cfg(not(windows))]
|
||||||
{
|
{
|
||||||
const SIGHUP: i32 = tokio::signal::unix::SignalKind::hangup().as_raw_value();
|
const SIGHUP: i32 = SignalKind::hangup().as_raw_value();
|
||||||
let path = Path::new(&log_file);
|
let path = Path::new(&log_file);
|
||||||
logger = logger.chain(fern::log_reopen1(path, [SIGHUP])?);
|
logger = logger.chain(fern::log_reopen1(path, [SIGHUP])?);
|
||||||
}
|
}
|
||||||
@@ -318,7 +398,9 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.apply()?;
|
if let Err(err) = logger.apply() {
|
||||||
|
err!(format!("Failed to activate logger: {err}"))
|
||||||
|
}
|
||||||
|
|
||||||
// Catch panics and log them instead of default output to StdErr
|
// Catch panics and log them instead of default output to StdErr
|
||||||
panic::set_hook(Box::new(|info| {
|
panic::set_hook(Box::new(|info| {
|
||||||
@@ -356,7 +438,7 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
|||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
|
|
||||||
Ok(())
|
Ok(level)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(windows))]
|
#[cfg(not(windows))]
|
||||||
@@ -513,11 +595,27 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error>
|
|||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
tokio::signal::ctrl_c().await.expect("Error setting Ctrl-C handler");
|
tokio::signal::ctrl_c().await.expect("Error setting Ctrl-C handler");
|
||||||
info!("Exiting vaultwarden!");
|
info!("Exiting Vaultwarden!");
|
||||||
CONFIG.shutdown();
|
CONFIG.shutdown();
|
||||||
});
|
});
|
||||||
|
|
||||||
let _ = instance.launch().await?;
|
#[cfg(unix)]
|
||||||
|
{
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut signal_user1 = tokio::signal::unix::signal(SignalKind::user_defined1()).unwrap();
|
||||||
|
loop {
|
||||||
|
// If we need more signals to act upon, we might want to use select! here.
|
||||||
|
// With only one item to listen for this is enough.
|
||||||
|
let _ = signal_user1.recv().await;
|
||||||
|
match backup_sqlite() {
|
||||||
|
Ok(f) => info!("Backup to '{f}' was successful"),
|
||||||
|
Err(e) => error!("Backup failed. {e:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
instance.launch().await?;
|
||||||
|
|
||||||
info!("Vaultwarden process exited!");
|
info!("Vaultwarden process exited!");
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -584,6 +682,13 @@ fn schedule_jobs(pool: db::DbPool) {
|
|||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clean unused, expired Duo authentication contexts.
|
||||||
|
if !CONFIG.duo_context_purge_schedule().is_empty() && CONFIG._enable_duo() && !CONFIG.duo_use_iframe() {
|
||||||
|
sched.add(Job::new(CONFIG.duo_context_purge_schedule().parse().unwrap(), || {
|
||||||
|
runtime.spawn(purge_duo_contexts(pool.clone()));
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
// Cleanup the event table of records x days old.
|
// Cleanup the event table of records x days old.
|
||||||
if CONFIG.org_events_enabled()
|
if CONFIG.org_events_enabled()
|
||||||
&& !CONFIG.event_cleanup_schedule().is_empty()
|
&& !CONFIG.event_cleanup_schedule().is_empty()
|
||||||
|
8
src/static/scripts/admin.js
vendored
8
src/static/scripts/admin.js
vendored
@@ -49,8 +49,8 @@ function _post(url, successMsg, errMsg, body, reload_page = true) {
|
|||||||
}).then(respText => {
|
}).then(respText => {
|
||||||
try {
|
try {
|
||||||
const respJson = JSON.parse(respText);
|
const respJson = JSON.parse(respText);
|
||||||
if (respJson.ErrorModel && respJson.ErrorModel.Message) {
|
if (respJson.errorModel && respJson.errorModel.message) {
|
||||||
return respJson.ErrorModel.Message;
|
return respJson.errorModel.message;
|
||||||
} else {
|
} else {
|
||||||
return Promise.reject({ body: `${respStatus} - ${respStatusText}\n\nUnknown error`, error: true });
|
return Promise.reject({ body: `${respStatus} - ${respStatusText}\n\nUnknown error`, error: true });
|
||||||
}
|
}
|
||||||
@@ -98,7 +98,7 @@ const showActiveTheme = (theme, focus = false) => {
|
|||||||
const themeSwitcherText = document.querySelector("#bd-theme-text");
|
const themeSwitcherText = document.querySelector("#bd-theme-text");
|
||||||
const activeThemeIcon = document.querySelector(".theme-icon-active use");
|
const activeThemeIcon = document.querySelector(".theme-icon-active use");
|
||||||
const btnToActive = document.querySelector(`[data-bs-theme-value="${theme}"]`);
|
const btnToActive = document.querySelector(`[data-bs-theme-value="${theme}"]`);
|
||||||
const svgOfActiveBtn = btnToActive.querySelector("span use").innerText;
|
const svgOfActiveBtn = btnToActive.querySelector("span use").textContent;
|
||||||
|
|
||||||
document.querySelectorAll("[data-bs-theme-value]").forEach(element => {
|
document.querySelectorAll("[data-bs-theme-value]").forEach(element => {
|
||||||
element.classList.remove("active");
|
element.classList.remove("active");
|
||||||
@@ -107,7 +107,7 @@ const showActiveTheme = (theme, focus = false) => {
|
|||||||
|
|
||||||
btnToActive.classList.add("active");
|
btnToActive.classList.add("active");
|
||||||
btnToActive.setAttribute("aria-pressed", "true");
|
btnToActive.setAttribute("aria-pressed", "true");
|
||||||
activeThemeIcon.innerText = svgOfActiveBtn;
|
activeThemeIcon.textContent = svgOfActiveBtn;
|
||||||
const themeSwitcherLabel = `${themeSwitcherText.textContent} (${btnToActive.dataset.bsThemeValue})`;
|
const themeSwitcherLabel = `${themeSwitcherText.textContent} (${btnToActive.dataset.bsThemeValue})`;
|
||||||
themeSwitcher.setAttribute("aria-label", themeSwitcherLabel);
|
themeSwitcher.setAttribute("aria-label", themeSwitcherLabel);
|
||||||
|
|
||||||
|
10
src/static/scripts/admin_diagnostics.js
vendored
10
src/static/scripts/admin_diagnostics.js
vendored
@@ -117,7 +117,7 @@ async function generateSupportString(event, dj) {
|
|||||||
supportString += `\n**Environment settings which are overridden:** ${dj.overrides}\n`;
|
supportString += `\n**Environment settings which are overridden:** ${dj.overrides}\n`;
|
||||||
supportString += "\n\n```json\n" + JSON.stringify(configJson, undefined, 2) + "\n```\n</details>\n";
|
supportString += "\n\n```json\n" + JSON.stringify(configJson, undefined, 2) + "\n```\n</details>\n";
|
||||||
|
|
||||||
document.getElementById("support-string").innerText = supportString;
|
document.getElementById("support-string").textContent = supportString;
|
||||||
document.getElementById("support-string").classList.remove("d-none");
|
document.getElementById("support-string").classList.remove("d-none");
|
||||||
document.getElementById("copy-support").classList.remove("d-none");
|
document.getElementById("copy-support").classList.remove("d-none");
|
||||||
}
|
}
|
||||||
@@ -126,7 +126,7 @@ function copyToClipboard(event) {
|
|||||||
event.preventDefault();
|
event.preventDefault();
|
||||||
event.stopPropagation();
|
event.stopPropagation();
|
||||||
|
|
||||||
const supportStr = document.getElementById("support-string").innerText;
|
const supportStr = document.getElementById("support-string").textContent;
|
||||||
const tmpCopyEl = document.createElement("textarea");
|
const tmpCopyEl = document.createElement("textarea");
|
||||||
|
|
||||||
tmpCopyEl.setAttribute("id", "copy-support-string");
|
tmpCopyEl.setAttribute("id", "copy-support-string");
|
||||||
@@ -201,7 +201,7 @@ function checkDns(dns_resolved) {
|
|||||||
|
|
||||||
function init(dj) {
|
function init(dj) {
|
||||||
// Time check
|
// Time check
|
||||||
document.getElementById("time-browser-string").innerText = browserUTC;
|
document.getElementById("time-browser-string").textContent = browserUTC;
|
||||||
|
|
||||||
// Check if we were able to fetch a valid NTP Time
|
// Check if we were able to fetch a valid NTP Time
|
||||||
// If so, compare both browser and server with NTP
|
// If so, compare both browser and server with NTP
|
||||||
@@ -217,7 +217,7 @@ function init(dj) {
|
|||||||
|
|
||||||
// Domain check
|
// Domain check
|
||||||
const browserURL = location.href.toLowerCase();
|
const browserURL = location.href.toLowerCase();
|
||||||
document.getElementById("domain-browser-string").innerText = browserURL;
|
document.getElementById("domain-browser-string").textContent = browserURL;
|
||||||
checkDomain(browserURL, dj.admin_url.toLowerCase());
|
checkDomain(browserURL, dj.admin_url.toLowerCase());
|
||||||
|
|
||||||
// Version check
|
// Version check
|
||||||
@@ -229,7 +229,7 @@ function init(dj) {
|
|||||||
|
|
||||||
// onLoad events
|
// onLoad events
|
||||||
document.addEventListener("DOMContentLoaded", (event) => {
|
document.addEventListener("DOMContentLoaded", (event) => {
|
||||||
const diag_json = JSON.parse(document.getElementById("diagnostics_json").innerText);
|
const diag_json = JSON.parse(document.getElementById("diagnostics_json").textContent);
|
||||||
init(diag_json);
|
init(diag_json);
|
||||||
|
|
||||||
const btnGenSupport = document.getElementById("gen-support");
|
const btnGenSupport = document.getElementById("gen-support");
|
||||||
|
2
src/static/scripts/admin_settings.js
vendored
2
src/static/scripts/admin_settings.js
vendored
@@ -122,7 +122,7 @@ function submitTestEmailOnEnter() {
|
|||||||
function colorRiskSettings() {
|
function colorRiskSettings() {
|
||||||
const risk_items = document.getElementsByClassName("col-form-label");
|
const risk_items = document.getElementsByClassName("col-form-label");
|
||||||
Array.from(risk_items).forEach((el) => {
|
Array.from(risk_items).forEach((el) => {
|
||||||
if (el.innerText.toLowerCase().includes("risks") ) {
|
if (el.textContent.toLowerCase().includes("risks") ) {
|
||||||
el.parentElement.className += " alert-danger";
|
el.parentElement.className += " alert-danger";
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
6
src/static/scripts/admin_users.js
vendored
6
src/static/scripts/admin_users.js
vendored
@@ -198,7 +198,8 @@ userOrgTypeDialog.addEventListener("show.bs.modal", function(event) {
|
|||||||
const orgName = event.relatedTarget.dataset.vwOrgName;
|
const orgName = event.relatedTarget.dataset.vwOrgName;
|
||||||
const orgUuid = event.relatedTarget.dataset.vwOrgUuid;
|
const orgUuid = event.relatedTarget.dataset.vwOrgUuid;
|
||||||
|
|
||||||
document.getElementById("userOrgTypeDialogTitle").innerHTML = `<b>Update User Type:</b><br><b>Organization:</b> ${orgName}<br><b>User:</b> ${userEmail}`;
|
document.getElementById("userOrgTypeDialogOrgName").textContent = orgName;
|
||||||
|
document.getElementById("userOrgTypeDialogUserEmail").textContent = userEmail;
|
||||||
document.getElementById("userOrgTypeUserUuid").value = userUuid;
|
document.getElementById("userOrgTypeUserUuid").value = userUuid;
|
||||||
document.getElementById("userOrgTypeOrgUuid").value = orgUuid;
|
document.getElementById("userOrgTypeOrgUuid").value = orgUuid;
|
||||||
document.getElementById(`userOrgType${userOrgTypeName}`).checked = true;
|
document.getElementById(`userOrgType${userOrgTypeName}`).checked = true;
|
||||||
@@ -206,7 +207,8 @@ userOrgTypeDialog.addEventListener("show.bs.modal", function(event) {
|
|||||||
|
|
||||||
// Prevent accidental submission of the form with valid elements after the modal has been hidden.
|
// Prevent accidental submission of the form with valid elements after the modal has been hidden.
|
||||||
userOrgTypeDialog.addEventListener("hide.bs.modal", function() {
|
userOrgTypeDialog.addEventListener("hide.bs.modal", function() {
|
||||||
document.getElementById("userOrgTypeDialogTitle").innerHTML = "";
|
document.getElementById("userOrgTypeDialogOrgName").textContent = "";
|
||||||
|
document.getElementById("userOrgTypeDialogUserEmail").textContent = "";
|
||||||
document.getElementById("userOrgTypeUserUuid").value = "";
|
document.getElementById("userOrgTypeUserUuid").value = "";
|
||||||
document.getElementById("userOrgTypeOrgUuid").value = "";
|
document.getElementById("userOrgTypeOrgUuid").value = "";
|
||||||
}, false);
|
}, false);
|
||||||
|
4
src/static/scripts/datatables.css
vendored
4
src/static/scripts/datatables.css
vendored
@@ -4,10 +4,10 @@
|
|||||||
*
|
*
|
||||||
* To rebuild or modify this file with the latest versions of the included
|
* To rebuild or modify this file with the latest versions of the included
|
||||||
* software please visit:
|
* software please visit:
|
||||||
* https://datatables.net/download/#bs5/dt-2.0.7
|
* https://datatables.net/download/#bs5/dt-2.0.8
|
||||||
*
|
*
|
||||||
* Included libraries:
|
* Included libraries:
|
||||||
* DataTables 2.0.7
|
* DataTables 2.0.8
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@charset "UTF-8";
|
@charset "UTF-8";
|
||||||
|
53
src/static/scripts/datatables.js
vendored
53
src/static/scripts/datatables.js
vendored
@@ -4,20 +4,20 @@
|
|||||||
*
|
*
|
||||||
* To rebuild or modify this file with the latest versions of the included
|
* To rebuild or modify this file with the latest versions of the included
|
||||||
* software please visit:
|
* software please visit:
|
||||||
* https://datatables.net/download/#bs5/dt-2.0.7
|
* https://datatables.net/download/#bs5/dt-2.0.8
|
||||||
*
|
*
|
||||||
* Included libraries:
|
* Included libraries:
|
||||||
* DataTables 2.0.7
|
* DataTables 2.0.8
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*! DataTables 2.0.7
|
/*! DataTables 2.0.8
|
||||||
* © SpryMedia Ltd - datatables.net/license
|
* © SpryMedia Ltd - datatables.net/license
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @summary DataTables
|
* @summary DataTables
|
||||||
* @description Paginate, search and order HTML tables
|
* @description Paginate, search and order HTML tables
|
||||||
* @version 2.0.7
|
* @version 2.0.8
|
||||||
* @author SpryMedia Ltd
|
* @author SpryMedia Ltd
|
||||||
* @contact www.datatables.net
|
* @contact www.datatables.net
|
||||||
* @copyright SpryMedia Ltd.
|
* @copyright SpryMedia Ltd.
|
||||||
@@ -563,7 +563,7 @@
|
|||||||
*
|
*
|
||||||
* @type string
|
* @type string
|
||||||
*/
|
*/
|
||||||
builder: "bs5/dt-2.0.7",
|
builder: "bs5/dt-2.0.8",
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -7572,6 +7572,16 @@
|
|||||||
order = opts.order, // applied, current, index (original - compatibility with 1.9)
|
order = opts.order, // applied, current, index (original - compatibility with 1.9)
|
||||||
page = opts.page; // all, current
|
page = opts.page; // all, current
|
||||||
|
|
||||||
|
if ( _fnDataSource( settings ) == 'ssp' ) {
|
||||||
|
// In server-side processing mode, most options are irrelevant since
|
||||||
|
// rows not shown don't exist and the index order is the applied order
|
||||||
|
// Removed is a special case - for consistency just return an empty
|
||||||
|
// array
|
||||||
|
return search === 'removed' ?
|
||||||
|
[] :
|
||||||
|
_range( 0, displayMaster.length );
|
||||||
|
}
|
||||||
|
|
||||||
if ( page == 'current' ) {
|
if ( page == 'current' ) {
|
||||||
// Current page implies that order=current and filter=applied, since it is
|
// Current page implies that order=current and filter=applied, since it is
|
||||||
// fairly senseless otherwise, regardless of what order and search actually
|
// fairly senseless otherwise, regardless of what order and search actually
|
||||||
@@ -8243,7 +8253,7 @@
|
|||||||
_api_register( _child_obj+'.isShown()', function () {
|
_api_register( _child_obj+'.isShown()', function () {
|
||||||
var ctx = this.context;
|
var ctx = this.context;
|
||||||
|
|
||||||
if ( ctx.length && this.length ) {
|
if ( ctx.length && this.length && ctx[0].aoData[ this[0] ] ) {
|
||||||
// _detailsShown as false or undefined will fall through to return false
|
// _detailsShown as false or undefined will fall through to return false
|
||||||
return ctx[0].aoData[ this[0] ]._detailsShow || false;
|
return ctx[0].aoData[ this[0] ]._detailsShow || false;
|
||||||
}
|
}
|
||||||
@@ -8266,7 +8276,7 @@
|
|||||||
// can be an array of these items, comma separated list, or an array of comma
|
// can be an array of these items, comma separated list, or an array of comma
|
||||||
// separated lists
|
// separated lists
|
||||||
|
|
||||||
var __re_column_selector = /^([^:]+):(name|title|visIdx|visible)$/;
|
var __re_column_selector = /^([^:]+)?:(name|title|visIdx|visible)$/;
|
||||||
|
|
||||||
|
|
||||||
// r1 and r2 are redundant - but it means that the parameters match for the
|
// r1 and r2 are redundant - but it means that the parameters match for the
|
||||||
@@ -8338,17 +8348,24 @@
|
|||||||
switch( match[2] ) {
|
switch( match[2] ) {
|
||||||
case 'visIdx':
|
case 'visIdx':
|
||||||
case 'visible':
|
case 'visible':
|
||||||
var idx = parseInt( match[1], 10 );
|
if (match[1]) {
|
||||||
// Visible index given, convert to column index
|
var idx = parseInt( match[1], 10 );
|
||||||
if ( idx < 0 ) {
|
// Visible index given, convert to column index
|
||||||
// Counting from the right
|
if ( idx < 0 ) {
|
||||||
var visColumns = columns.map( function (col,i) {
|
// Counting from the right
|
||||||
return col.bVisible ? i : null;
|
var visColumns = columns.map( function (col,i) {
|
||||||
} );
|
return col.bVisible ? i : null;
|
||||||
return [ visColumns[ visColumns.length + idx ] ];
|
} );
|
||||||
|
return [ visColumns[ visColumns.length + idx ] ];
|
||||||
|
}
|
||||||
|
// Counting from the left
|
||||||
|
return [ _fnVisibleToColumnIndex( settings, idx ) ];
|
||||||
}
|
}
|
||||||
// Counting from the left
|
|
||||||
return [ _fnVisibleToColumnIndex( settings, idx ) ];
|
// `:visible` on its own
|
||||||
|
return columns.map( function (col, i) {
|
||||||
|
return col.bVisible ? i : null;
|
||||||
|
} );
|
||||||
|
|
||||||
case 'name':
|
case 'name':
|
||||||
// match by name. `names` is column index complete and in order
|
// match by name. `names` is column index complete and in order
|
||||||
@@ -9623,7 +9640,7 @@
|
|||||||
* @type string
|
* @type string
|
||||||
* @default Version number
|
* @default Version number
|
||||||
*/
|
*/
|
||||||
DataTable.version = "2.0.7";
|
DataTable.version = "2.0.8";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Private data store, containing all of the settings objects that are
|
* Private data store, containing all of the settings objects that are
|
||||||
|
@@ -44,7 +44,7 @@
|
|||||||
<span class="d-block"><strong>Events:</strong> {{event_count}}</span>
|
<span class="d-block"><strong>Events:</strong> {{event_count}}</span>
|
||||||
</td>
|
</td>
|
||||||
<td class="text-end px-0 small">
|
<td class="text-end px-0 small">
|
||||||
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-delete-organization data-vw-org-uuid="{{jsesc id no_quote}}" data-vw-org-name="{{jsesc name no_quote}}" data-vw-billing-email="{{jsesc billingEmail no_quote}}">Delete Organization</button><br>
|
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-delete-organization data-vw-org-uuid="{{id}}" data-vw-org-name="{{name}}" data-vw-billing-email="{{billingEmail}}">Delete Organization</button><br>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
{{/each}}
|
{{/each}}
|
||||||
|
@@ -54,14 +54,14 @@
|
|||||||
{{/if}}
|
{{/if}}
|
||||||
</td>
|
</td>
|
||||||
<td>
|
<td>
|
||||||
<div class="overflow-auto vw-org-cell" data-vw-user-email="{{jsesc email no_quote}}" data-vw-user-uuid="{{jsesc id no_quote}}">
|
<div class="overflow-auto vw-org-cell" data-vw-user-email="{{email}}" data-vw-user-uuid="{{id}}">
|
||||||
{{#each organizations}}
|
{{#each organizations}}
|
||||||
<button class="badge" data-bs-toggle="modal" data-bs-target="#userOrgTypeDialog" data-vw-org-type="{{type}}" data-vw-org-uuid="{{jsesc id no_quote}}" data-vw-org-name="{{jsesc name no_quote}}">{{name}}</button>
|
<button class="badge" data-bs-toggle="modal" data-bs-target="#userOrgTypeDialog" data-vw-org-type="{{type}}" data-vw-org-uuid="{{id}}" data-vw-org-name="{{name}}">{{name}}</button>
|
||||||
{{/each}}
|
{{/each}}
|
||||||
</div>
|
</div>
|
||||||
</td>
|
</td>
|
||||||
<td class="text-end px-0 small">
|
<td class="text-end px-0 small">
|
||||||
<span data-vw-user-uuid="{{jsesc id no_quote}}" data-vw-user-email="{{jsesc email no_quote}}">
|
<span data-vw-user-uuid="{{id}}" data-vw-user-email="{{email}}">
|
||||||
{{#if twoFactorEnabled}}
|
{{#if twoFactorEnabled}}
|
||||||
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-remove2fa>Remove all 2FA</button><br>
|
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-remove2fa>Remove all 2FA</button><br>
|
||||||
{{/if}}
|
{{/if}}
|
||||||
@@ -109,7 +109,9 @@
|
|||||||
<div class="modal-dialog modal-dialog-centered modal-sm">
|
<div class="modal-dialog modal-dialog-centered modal-sm">
|
||||||
<div class="modal-content">
|
<div class="modal-content">
|
||||||
<div class="modal-header">
|
<div class="modal-header">
|
||||||
<h6 class="modal-title" id="userOrgTypeDialogTitle"></h6>
|
<h6 class="modal-title">
|
||||||
|
<b>Update User Type:</b><br><b>Organization:</b> <span id="userOrgTypeDialogOrgName"></span><br><b>User:</b> <span id="userOrgTypeDialogUserEmail"></span>
|
||||||
|
</h6>
|
||||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||||
</div>
|
</div>
|
||||||
<form class="form" id="userOrgTypeForm">
|
<form class="form" id="userOrgTypeForm">
|
||||||
|
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
<table class="footer" cellpadding="0" cellspacing="0" width="100%" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; clear: both; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; width: 100%;">
|
<table class="footer" cellpadding="0" cellspacing="0" width="100%" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; clear: both; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; width: 100%;">
|
||||||
<tr style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0;">
|
<tr style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0;">
|
||||||
<td class="aligncenter social-icons" align="center" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; padding: 15px 0 0 0;" valign="top">
|
<td class="aligncenter social-icons" align="center" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; padding: 5px 0 20px 0;" valign="top">
|
||||||
<table cellpadding="0" cellspacing="0" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0 auto;">
|
<table cellpadding="0" cellspacing="0" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0 auto;">
|
||||||
<tr style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0;">
|
<tr style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0;">
|
||||||
<td style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; padding: 0 10px;" valign="top"><a href="https://github.com/dani-garcia/vaultwarden" target="_blank" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; text-decoration: underline;"><img src="{{img_src}}mail-github.png" alt="GitHub" width="30" height="30" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; border: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0; max-width: 100%;" /></a></td>
|
<td style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; padding: 0 10px;" valign="top"><a href="https://github.com/dani-garcia/vaultwarden" target="_blank" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; text-decoration: underline;"><img src="{{img_src}}mail-github.png" alt="GitHub" width="30" height="30" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; border: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0; max-width: 100%;" /></a></td>
|
||||||
|
@@ -1,10 +1,11 @@
|
|||||||
Incomplete Two-Step Login From {{{device}}}
|
Incomplete Two-Step Login From {{{device_name}}}
|
||||||
<!---------------->
|
<!---------------->
|
||||||
Someone attempted to log into your account with the correct master password, but did not provide the correct token or action required to complete the two-step login process within {{time_limit}} minutes of the initial login attempt.
|
Someone attempted to log into your account with the correct master password, but did not provide the correct token or action required to complete the two-step login process within {{time_limit}} minutes of the initial login attempt.
|
||||||
|
|
||||||
* Date: {{datetime}}
|
* Date: {{datetime}}
|
||||||
* IP Address: {{ip}}
|
* IP Address: {{ip}}
|
||||||
* Device Type: {{device}}
|
* Device Name: {{device_name}}
|
||||||
|
* Device Type: {{device_type}}
|
||||||
|
|
||||||
If this was not you or someone you authorized, then you should change your master password as soon as possible, as it is likely to be compromised.
|
If this was not you or someone you authorized, then you should change your master password as soon as possible, as it is likely to be compromised.
|
||||||
{{> email/email_footer_text }}
|
{{> email/email_footer_text }}
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
Incomplete Two-Step Login From {{{device}}}
|
Incomplete Two-Step Login From {{{device_name}}}
|
||||||
<!---------------->
|
<!---------------->
|
||||||
{{> email/email_header }}
|
{{> email/email_header }}
|
||||||
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
@@ -19,7 +19,12 @@ Incomplete Two-Step Login From {{{device}}}
|
|||||||
</tr>
|
</tr>
|
||||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
|
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
|
||||||
<b>Device Type:</b> {{device}}
|
<b>Device Name:</b> {{device_name}}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
|
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
|
||||||
|
<b>Device Type:</b> {{device_type}}
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
|
@@ -1,10 +1,11 @@
|
|||||||
New Device Logged In From {{{device}}}
|
New Device Logged In From {{{device_name}}}
|
||||||
<!---------------->
|
<!---------------->
|
||||||
Your account was just logged into from a new device.
|
Your account was just logged into from a new device.
|
||||||
|
|
||||||
* Date: {{datetime}}
|
* Date: {{datetime}}
|
||||||
* IP Address: {{ip}}
|
* IP Address: {{ip}}
|
||||||
* Device Type: {{device}}
|
* Device Name: {{device_name}}
|
||||||
|
* Device Type: {{device_type}}
|
||||||
|
|
||||||
You can deauthorize all devices that have access to your account from the web vault ( {{url}} ) under Settings > My Account > Deauthorize Sessions.
|
You can deauthorize all devices that have access to your account from the web vault ( {{url}} ) under Settings > My Account > Deauthorize Sessions.
|
||||||
{{> email/email_footer_text }}
|
{{> email/email_footer_text }}
|
@@ -1,4 +1,4 @@
|
|||||||
New Device Logged In From {{{device}}}
|
New Device Logged In From {{{device_name}}}
|
||||||
<!---------------->
|
<!---------------->
|
||||||
{{> email/email_header }}
|
{{> email/email_header }}
|
||||||
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
@@ -9,7 +9,7 @@ New Device Logged In From {{{device}}}
|
|||||||
</tr>
|
</tr>
|
||||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
|
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
|
||||||
<b>Date</b>: {{datetime}}
|
<b>Date:</b> {{datetime}}
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
@@ -19,7 +19,12 @@ New Device Logged In From {{{device}}}
|
|||||||
</tr>
|
</tr>
|
||||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
|
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
|
||||||
<b>Device Type:</b> {{device}}
|
<b>Device Name:</b> {{device_name}}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
|
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
|
||||||
|
<b>Device Type:</b> {{device_type}}
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
|
@@ -3,7 +3,7 @@ Join {{{org_name}}}
|
|||||||
You have been invited to join the *{{org_name}}* organization.
|
You have been invited to join the *{{org_name}}* organization.
|
||||||
|
|
||||||
|
|
||||||
Click here to join: {{url}}/#/accept-organization/?organizationId={{org_id}}&organizationUserId={{org_user_id}}&email={{email}}&organizationName={{org_name_encoded}}&token={{token}}
|
Click here to join: {{url}}
|
||||||
|
|
||||||
|
|
||||||
If you do not wish to join this organization, you can safely ignore this email.
|
If you do not wish to join this organization, you can safely ignore this email.
|
||||||
|
@@ -9,7 +9,7 @@ Join {{{org_name}}}
|
|||||||
</tr>
|
</tr>
|
||||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
|
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
|
||||||
<a href="{{url}}/#/accept-organization/?organizationId={{org_id}}&organizationUserId={{org_user_id}}&email={{email}}&organizationName={{org_name_encoded}}&token={{token}}"
|
<a href="{{url}}"
|
||||||
clicktracking=off target="_blank" style="color: #ffffff; text-decoration: none; text-align: center; cursor: pointer; display: inline-block; border-radius: 5px; background-color: #3c8dbc; border-color: #3c8dbc; border-style: solid; border-width: 10px 20px; margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
clicktracking=off target="_blank" style="color: #ffffff; text-decoration: none; text-align: center; cursor: pointer; display: inline-block; border-radius: 5px; background-color: #3c8dbc; border-color: #3c8dbc; border-style: solid; border-width: 10px 20px; margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
Join Organization Now
|
Join Organization Now
|
||||||
</a>
|
</a>
|
||||||
|
179
src/util.rs
179
src/util.rs
@@ -4,7 +4,6 @@
|
|||||||
use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path};
|
use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path};
|
||||||
|
|
||||||
use num_traits::ToPrimitive;
|
use num_traits::ToPrimitive;
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
use rocket::{
|
use rocket::{
|
||||||
fairing::{Fairing, Info, Kind},
|
fairing::{Fairing, Info, Kind},
|
||||||
http::{ContentType, Header, HeaderMap, Method, Status},
|
http::{ContentType, Header, HeaderMap, Method, Status},
|
||||||
@@ -214,7 +213,7 @@ impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cache
|
|||||||
};
|
};
|
||||||
res.set_raw_header("Cache-Control", cache_control_header);
|
res.set_raw_header("Cache-Control", cache_control_header);
|
||||||
|
|
||||||
let time_now = chrono::Local::now();
|
let time_now = Local::now();
|
||||||
let expiry_time = time_now + chrono::TimeDelta::try_seconds(self.ttl.try_into().unwrap()).unwrap();
|
let expiry_time = time_now + chrono::TimeDelta::try_seconds(self.ttl.try_into().unwrap()).unwrap();
|
||||||
res.set_raw_header("Expires", format_datetime_http(&expiry_time));
|
res.set_raw_header("Expires", format_datetime_http(&expiry_time));
|
||||||
Ok(res)
|
Ok(res)
|
||||||
@@ -223,8 +222,8 @@ impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cache
|
|||||||
|
|
||||||
pub struct SafeString(String);
|
pub struct SafeString(String);
|
||||||
|
|
||||||
impl std::fmt::Display for SafeString {
|
impl fmt::Display for SafeString {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
self.0.fmt(f)
|
self.0.fmt(f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -514,6 +513,28 @@ pub fn container_base_image() -> &'static str {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct WebVaultVersion {
|
||||||
|
version: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_web_vault_version() -> String {
|
||||||
|
let version_files = [
|
||||||
|
format!("{}/vw-version.json", CONFIG.web_vault_folder()),
|
||||||
|
format!("{}/version.json", CONFIG.web_vault_folder()),
|
||||||
|
];
|
||||||
|
|
||||||
|
for version_file in version_files {
|
||||||
|
if let Ok(version_str) = std::fs::read_to_string(&version_file) {
|
||||||
|
if let Ok(version) = serde_json::from_str::<WebVaultVersion>(&version_str) {
|
||||||
|
return String::from(version.version.trim_start_matches('v'));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
String::from("Version file missing")
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Deserialization methods
|
// Deserialization methods
|
||||||
//
|
//
|
||||||
@@ -591,7 +612,7 @@ impl<'de> Visitor<'de> for LowerCaseVisitor {
|
|||||||
fn _process_key(key: &str) -> String {
|
fn _process_key(key: &str) -> String {
|
||||||
match key.to_lowercase().as_ref() {
|
match key.to_lowercase().as_ref() {
|
||||||
"ssn" => "ssn".into(),
|
"ssn" => "ssn".into(),
|
||||||
_ => self::lcase_first(key),
|
_ => lcase_first(key),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -686,19 +707,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use reqwest::{header, Client, ClientBuilder};
|
|
||||||
|
|
||||||
pub fn get_reqwest_client() -> &'static Client {
|
|
||||||
static INSTANCE: Lazy<Client> = Lazy::new(|| get_reqwest_client_builder().build().expect("Failed to build client"));
|
|
||||||
&INSTANCE
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_reqwest_client_builder() -> ClientBuilder {
|
|
||||||
let mut headers = header::HeaderMap::new();
|
|
||||||
headers.insert(header::USER_AGENT, header::HeaderValue::from_static("Vaultwarden"));
|
|
||||||
Client::builder().default_headers(headers).timeout(Duration::from_secs(10))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
|
pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
|
||||||
match src_json {
|
match src_json {
|
||||||
Value::Array(elm) => {
|
Value::Array(elm) => {
|
||||||
@@ -744,144 +752,11 @@ pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
|
|||||||
|
|
||||||
/// Parses the experimental client feature flags string into a HashMap.
|
/// Parses the experimental client feature flags string into a HashMap.
|
||||||
pub fn parse_experimental_client_feature_flags(experimental_client_feature_flags: &str) -> HashMap<String, bool> {
|
pub fn parse_experimental_client_feature_flags(experimental_client_feature_flags: &str) -> HashMap<String, bool> {
|
||||||
let feature_states =
|
let feature_states = experimental_client_feature_flags.split(',').map(|f| (f.trim().to_owned(), true)).collect();
|
||||||
experimental_client_feature_flags.to_lowercase().split(',').map(|f| (f.trim().to_owned(), true)).collect();
|
|
||||||
|
|
||||||
feature_states
|
feature_states
|
||||||
}
|
}
|
||||||
|
|
||||||
mod dns_resolver {
|
|
||||||
use std::{
|
|
||||||
fmt,
|
|
||||||
net::{IpAddr, SocketAddr},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use hickory_resolver::{system_conf::read_system_conf, TokioAsyncResolver};
|
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
use reqwest::dns::{Name, Resolve, Resolving};
|
|
||||||
|
|
||||||
use crate::{util::is_global, CONFIG};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub enum CustomResolverError {
|
|
||||||
Blacklist {
|
|
||||||
domain: String,
|
|
||||||
},
|
|
||||||
NonGlobalIp {
|
|
||||||
domain: String,
|
|
||||||
ip: IpAddr,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CustomResolverError {
|
|
||||||
pub fn downcast_ref(e: &dyn std::error::Error) -> Option<&Self> {
|
|
||||||
let mut source = e.source();
|
|
||||||
|
|
||||||
while let Some(err) = source {
|
|
||||||
source = err.source();
|
|
||||||
if let Some(err) = err.downcast_ref::<CustomResolverError>() {
|
|
||||||
return Some(err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for CustomResolverError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
Self::Blacklist {
|
|
||||||
domain,
|
|
||||||
} => write!(f, "Blacklisted domain: {domain} matched ICON_BLACKLIST_REGEX"),
|
|
||||||
Self::NonGlobalIp {
|
|
||||||
domain,
|
|
||||||
ip,
|
|
||||||
} => write!(f, "IP {ip} for domain '{domain}' is not a global IP!"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::error::Error for CustomResolverError {}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub enum CustomDnsResolver {
|
|
||||||
Default(),
|
|
||||||
Hickory(Arc<TokioAsyncResolver>),
|
|
||||||
}
|
|
||||||
type BoxError = Box<dyn std::error::Error + Send + Sync>;
|
|
||||||
|
|
||||||
impl CustomDnsResolver {
|
|
||||||
pub fn instance() -> Arc<Self> {
|
|
||||||
static INSTANCE: Lazy<Arc<CustomDnsResolver>> = Lazy::new(CustomDnsResolver::new);
|
|
||||||
Arc::clone(&*INSTANCE)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new() -> Arc<Self> {
|
|
||||||
match read_system_conf() {
|
|
||||||
Ok((config, opts)) => {
|
|
||||||
let resolver = TokioAsyncResolver::tokio(config.clone(), opts.clone());
|
|
||||||
Arc::new(Self::Hickory(Arc::new(resolver)))
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
|
|
||||||
Arc::new(Self::Default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note that we get an iterator of addresses, but we only grab the first one for convenience
|
|
||||||
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
|
|
||||||
pre_resolve(name)?;
|
|
||||||
|
|
||||||
let result = match self {
|
|
||||||
Self::Default() => tokio::net::lookup_host(name).await?.next(),
|
|
||||||
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(addr) = &result {
|
|
||||||
post_resolve(name, addr.ip())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pre_resolve(name: &str) -> Result<(), CustomResolverError> {
|
|
||||||
if crate::api::is_domain_blacklisted(name) {
|
|
||||||
return Err(CustomResolverError::Blacklist {
|
|
||||||
domain: name.to_string(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn post_resolve(name: &str, ip: IpAddr) -> Result<(), CustomResolverError> {
|
|
||||||
if CONFIG.icon_blacklist_non_global_ips() && !is_global(ip) {
|
|
||||||
Err(CustomResolverError::NonGlobalIp {
|
|
||||||
domain: name.to_string(),
|
|
||||||
ip,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Resolve for CustomDnsResolver {
|
|
||||||
fn resolve(&self, name: Name) -> Resolving {
|
|
||||||
let this = self.clone();
|
|
||||||
Box::pin(async move {
|
|
||||||
let name = name.as_str();
|
|
||||||
let result = this.resolve_domain(name).await?;
|
|
||||||
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub use dns_resolver::{CustomDnsResolver, CustomResolverError};
|
|
||||||
|
|
||||||
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
||||||
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
||||||
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
||||||
|
Reference in New Issue
Block a user