mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-09 18:25:58 +03:00
Compare commits
34 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
2bb6482bec | ||
|
c169095128 | ||
|
b1397c95ca | ||
|
3df31e3464 | ||
|
638a0fd3c3 | ||
|
ebb66c374e | ||
|
89e3c41043 | ||
|
3da410ef71 | ||
|
2dccbd3412 | ||
|
2ff529ed99 | ||
|
4fae1e4298 | ||
|
f7951b44ba | ||
|
ff8eeff995 | ||
|
00019dc356 | ||
|
404fe5321e | ||
|
e7dd239d20 | ||
|
071f3370e3 | ||
|
ee321be579 | ||
|
eb61425da5 | ||
|
b75ba216d1 | ||
|
8651df8c2a | ||
|
948554a20f | ||
|
9cdb605659 | ||
|
928e2424c0 | ||
|
a01fee0b9f | ||
|
67adfee5e5 | ||
|
d66d4fd87f | ||
|
434551e012 | ||
|
69dcbdd3b2 | ||
|
422f7ccfa8 | ||
|
f8ae5013cb | ||
|
d8e5e53273 | ||
|
b6502e9e9d | ||
|
d70864ac73 |
7
.env
7
.env
@@ -14,6 +14,9 @@
|
||||
# WEB_VAULT_FOLDER=web-vault/
|
||||
# WEB_VAULT_ENABLED=true
|
||||
|
||||
## Controls the WebSocket server port
|
||||
# WEBSOCKET_PORT=3012
|
||||
|
||||
## Controls if new users can register
|
||||
# SIGNUPS_ALLOWED=true
|
||||
|
||||
@@ -42,8 +45,10 @@
|
||||
# ROCKET_PORT=8000
|
||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||
|
||||
## Mail specific settings, if SMTP_HOST is specified, SMTP_USERNAME and SMTP_PASSWORD are mandatory
|
||||
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
|
||||
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
||||
# SMTP_HOST=smtp.domain.tld
|
||||
# SMTP_FROM=bitwarden-rs@domain.tld
|
||||
# SMTP_PORT=587
|
||||
# SMTP_SSL=true
|
||||
# SMTP_USERNAME=username
|
||||
|
1096
Cargo.lock
generated
1096
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
40
Cargo.toml
40
Cargo.toml
@@ -10,18 +10,27 @@ rocket_codegen = "0.3.16"
|
||||
rocket_contrib = "0.3.16"
|
||||
|
||||
# HTTP client
|
||||
reqwest = "0.8.8"
|
||||
reqwest = "0.9.0"
|
||||
|
||||
# multipart/form-data support
|
||||
multipart = "0.15.2"
|
||||
multipart = "0.15.3"
|
||||
|
||||
# WebSockets library
|
||||
ws = "0.7.8"
|
||||
|
||||
# MessagePack library
|
||||
rmpv = "0.4.0"
|
||||
|
||||
# Concurrent hashmap implementation
|
||||
chashmap = "2.2.0"
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = "1.0.74"
|
||||
serde_derive = "1.0.74"
|
||||
serde_json = "1.0.26"
|
||||
serde = "1.0.79"
|
||||
serde_derive = "1.0.79"
|
||||
serde_json = "1.0.28"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "1.3.2", features = ["sqlite", "chrono", "r2d2"] }
|
||||
diesel = { version = "1.3.3", features = ["sqlite", "chrono", "r2d2"] }
|
||||
diesel_migrations = { version = "1.3.0", features = ["sqlite"] }
|
||||
|
||||
# Bundled SQLite
|
||||
@@ -31,10 +40,10 @@ libsqlite3-sys = { version = "0.9.3", features = ["bundled"] }
|
||||
ring = { version = "= 0.11.0", features = ["rsa_signing"] }
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "0.6.5", features = ["v4"] }
|
||||
uuid = { version = "0.7.1", features = ["v4"] }
|
||||
|
||||
# Date and time library for Rust
|
||||
chrono = "0.4.5"
|
||||
chrono = "0.4.6"
|
||||
|
||||
# TOTP library
|
||||
oath = "0.10.2"
|
||||
@@ -55,17 +64,24 @@ dotenv = { version = "0.13.0", default-features = false }
|
||||
lazy_static = "1.1.0"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.5"
|
||||
num-traits = "0.2.6"
|
||||
num-derive = "0.2.2"
|
||||
|
||||
lettre = "0.8.2"
|
||||
lettre_email = "0.8.2"
|
||||
native-tls = "0.1.5"
|
||||
# Email libraries
|
||||
lettre = "0.9.0"
|
||||
lettre_email = "0.9.0"
|
||||
native-tls = "0.2.1"
|
||||
fast_chemail = "0.9.5"
|
||||
|
||||
# Number encoding library
|
||||
byteorder = "1.2.6"
|
||||
|
||||
[patch.crates-io]
|
||||
# Make jwt use ring 0.11, to match rocket
|
||||
jsonwebtoken = { path = "libs/jsonwebtoken" }
|
||||
rmp = { git = 'https://github.com/dani-garcia/msgpack-rust' }
|
||||
lettre = { git = 'https://github.com/lettre/lettre', rev = 'fc91bb6ee8f9a' }
|
||||
lettre_email = { git = 'https://github.com/lettre/lettre', rev = 'fc91bb6ee8f9a' }
|
||||
|
||||
# Version 0.1.2 from crates.io lacks a commit that fixes a certificate error
|
||||
u2f = { git = 'https://github.com/wisespace-io/u2f-rs', rev = '193de35093a44' }
|
||||
|
@@ -4,7 +4,7 @@
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM node:8-alpine as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.2.0"
|
||||
ENV VAULT_VERSION "v2.3.0"
|
||||
|
||||
ENV URL "https://github.com/bitwarden/web.git"
|
||||
|
||||
@@ -76,6 +76,7 @@ RUN apt-get update && apt-get install -y\
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (env file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
|
@@ -4,7 +4,7 @@
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM node:8-alpine as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.2.0"
|
||||
ENV VAULT_VERSION "v2.3.0"
|
||||
|
||||
ENV URL "https://github.com/bitwarden/web.git"
|
||||
|
||||
@@ -68,6 +68,7 @@ RUN apk add \
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (env file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
|
@@ -4,7 +4,7 @@
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM node:8-alpine as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.2.0"
|
||||
ENV VAULT_VERSION "v2.3.0"
|
||||
|
||||
ENV URL "https://github.com/bitwarden/web.git"
|
||||
|
||||
|
95
PROXY.md
Normal file
95
PROXY.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# Proxy examples
|
||||
|
||||
In this document, `<SERVER>` refers to the IP or domain where bitwarden_rs is accessible from. If both the proxy and bitwarden_rs are running in the same system, simply use `localhost`.
|
||||
The ports proxied by default are `80` for the web server and `3012` for the WebSocket server. The proxies are configured to listen in port `443` with HTTPS enabled, which is recommended.
|
||||
|
||||
When using a proxy, it's preferrable to configure HTTPS at the proxy level and not at the application level, this way the WebSockets connection is also secured.
|
||||
|
||||
## Caddy
|
||||
|
||||
```nginx
|
||||
localhost:443 {
|
||||
# The negotiation endpoint is also proxied to Rocket
|
||||
proxy /notifications/hub/negotiate <SERVER>:80 {
|
||||
transparent
|
||||
}
|
||||
|
||||
# Notifications redirected to the websockets server
|
||||
proxy /notifications/hub <SERVER>:3012 {
|
||||
websocket
|
||||
}
|
||||
|
||||
# Proxy the Root directory to Rocket
|
||||
proxy / <SERVER>:80 {
|
||||
transparent
|
||||
}
|
||||
|
||||
tls ${SSLCERTIFICATE} ${SSLKEY}
|
||||
}
|
||||
```
|
||||
|
||||
## Nginx (by shauder)
|
||||
```nginx
|
||||
server {
|
||||
include conf.d/ssl/ssl.conf;
|
||||
|
||||
listen 443 ssl http2;
|
||||
server_name vault.*;
|
||||
|
||||
location /notifications/hub/negotiate {
|
||||
include conf.d/proxy-confs/proxy.conf;
|
||||
proxy_pass http://<SERVER>:80;
|
||||
}
|
||||
|
||||
location / {
|
||||
include conf.d/proxy-confs/proxy.conf;
|
||||
proxy_pass http://<SERVER>:80;
|
||||
}
|
||||
|
||||
location /notifications/hub {
|
||||
proxy_pass http://<SERVER>:3012;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Apache (by fbartels)
|
||||
```apache
|
||||
<VirtualHost *:443>
|
||||
SSLEngine on
|
||||
ServerName bitwarden.$hostname.$domainname
|
||||
|
||||
SSLCertificateFile ${SSLCERTIFICATE}
|
||||
SSLCertificateKeyFile ${SSLKEY}
|
||||
SSLCACertificateFile ${SSLCA}
|
||||
${SSLCHAIN}
|
||||
|
||||
ErrorLog \${APACHE_LOG_DIR}/bitwarden-error.log
|
||||
CustomLog \${APACHE_LOG_DIR}/bitwarden-access.log combined
|
||||
|
||||
RewriteEngine On
|
||||
RewriteCond %{HTTP:Upgrade} =websocket [NC]
|
||||
RewriteRule /(.*) ws://<SERVER>:3012/$1 [P,L]
|
||||
|
||||
ProxyPass / http://<SERVER>:80/
|
||||
|
||||
ProxyPreserveHost On
|
||||
ProxyRequests Off
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
## Traefik (docker-compose example)
|
||||
```traefik
|
||||
labels:
|
||||
- 'traefik.frontend.rule=Host:vault.example.local'
|
||||
- 'traefik.docker.network=traefik'
|
||||
- 'traefik.port=80'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.web.frontend.rule=Host:vault.example.local'
|
||||
- 'traefik.web.port=80'
|
||||
- 'traefik.hub.frontend.rule=Path:/notifications/hub'
|
||||
- 'traefik.hub.port=3012'
|
||||
- 'traefik.negotiate.frontend.rule=Path:/notifications/hub/negotiate'
|
||||
- 'traefik.negotiate.port=80'
|
||||
```
|
36
README.md
36
README.md
@@ -25,6 +25,7 @@ _*Note, that this project is not associated with the [Bitwarden](https://bitward
|
||||
- [Disable registration of new users](#disable-registration-of-new-users)
|
||||
- [Disable invitations](#disable-invitations)
|
||||
- [Enabling HTTPS](#enabling-https)
|
||||
- [Enabling WebSocket notifications](#enabling-websocket-notifications)
|
||||
- [Enabling U2F authentication](#enabling-u2f-authentication)
|
||||
- [Changing persistent data location](#changing-persistent-data-location)
|
||||
- [/data prefix:](#data-prefix)
|
||||
@@ -175,6 +176,34 @@ docker run -d --name bitwarden \
|
||||
```
|
||||
Note that you need to mount ssl files and you need to forward appropriate port.
|
||||
|
||||
Softwares used for getting certs are often using symlinks. If that is the case, both locations need to be accessible to the docker container.
|
||||
|
||||
Example: [certbot](https://certbot.eff.org/) will create a folder that contains the needed `cert.pem` and `privacy.pem` files in `/etc/letsencrypt/live/mydomain/`
|
||||
|
||||
These files are symlinked to `../../archive/mydomain/mykey.pem`
|
||||
|
||||
So to use from bitwarden container:
|
||||
|
||||
```sh
|
||||
docker run -d --name bitwarden \
|
||||
-e ROCKET_TLS='{certs="/ssl/live/mydomain/cert.pem",key="/ssl/live/mydomain/privkey.pem"}' \
|
||||
-v /etc/letsencrypt/:/ssl/ \
|
||||
-v /bw-data/:/data/ \
|
||||
-p 443:80 \
|
||||
mprasil/bitwarden:latest
|
||||
```
|
||||
### Enabling WebSocket notifications
|
||||
*Important: This does not apply to the mobile clients, which use push notifications.*
|
||||
|
||||
To enable WebSockets notifications, an external reverse proxy is necessary, and it must be configured to do the following:
|
||||
- Route the `/notifications/hub` endpoint to the WebSocket server, by default at port `3012`, making sure to pass the `Connection` and `Upgrade` headers.
|
||||
- Route everything else, including `/notifications/hub/negotiate`, to the standard Rocket server, by default at port `80`.
|
||||
- If using Docker, you may need to map both ports with the `-p` flag
|
||||
|
||||
Example configurations are included in the [PROXY.md](https://github.com/dani-garcia/bitwarden_rs/blob/master/PROXY.md) file.
|
||||
|
||||
Note: The reason for this workaround is the lack of support for WebSockets from Rocket (though [it's a planned feature](https://github.com/SergioBenitez/Rocket/issues/90)), which forces us to launch a secondary server on a separate port.
|
||||
|
||||
### Enabling U2F authentication
|
||||
To enable U2F authentication, you must be serving bitwarden_rs from an HTTPS domain with a valid certificate (Either using the included
|
||||
HTTPS options or with a reverse proxy). We recommend using a free certificate from Let's Encrypt.
|
||||
@@ -287,6 +316,7 @@ You can configure bitwarden_rs to send emails via a SMTP agent:
|
||||
```sh
|
||||
docker run -d --name bitwarden \
|
||||
-e SMTP_HOST=<smtp.domain.tld> \
|
||||
-e SMTP_FROM=<bitwarden@domain.tld> \
|
||||
-e SMTP_PORT=587 \
|
||||
-e SMTP_SSL=true \
|
||||
-e SMTP_USERNAME=<username> \
|
||||
@@ -348,7 +378,7 @@ docker build -t bitwarden_rs .
|
||||
|
||||
## Building binary
|
||||
|
||||
For building binary outside the Docker environment and running it locally without docker, please see [build instructions](BUILD.md).
|
||||
For building binary outside the Docker environment and running it locally without docker, please see [build instructions](https://github.com/dani-garcia/bitwarden_rs/blob/master/BUILD.md).
|
||||
|
||||
## Available packages
|
||||
|
||||
@@ -411,7 +441,9 @@ We use upstream Vault interface directly without any (significant) changes, this
|
||||
|
||||
### Inviting users into organization
|
||||
|
||||
If you have [invitations disabled](#disable-invitations), the users must already be registered on your server to invite them. The invited users won't get the invitation email, instead they will appear in the interface as if they already accepted the invitation. (if the user has already registered) Organization admin then just needs to confirm them to be proper Organization members and to give them access to the shared secrets.
|
||||
The invited users won't get the invitation email, instead all already registered users will appear in the interface as if they already accepted the invitation. Organization admin then just needs to confirm them to be proper Organization members and to give them access to the shared secrets.
|
||||
|
||||
Invited users, that aren't registered yet will show up in the Organization admin interface as "Invited". At the same time an invitation record is created that allows the users to register even if [user registration is disabled](#disable-registration-of-new-users). (unless you [disable this functionality](#disable-invitations)) They will automatically become "Accepted" once they register. From there Organization admin can confirm them to give them access to Organization.
|
||||
|
||||
### Running on unencrypted connection
|
||||
|
||||
|
7
migrations/2018-09-19-144557_add_kdf_columns/up.sql
Normal file
7
migrations/2018-09-19-144557_add_kdf_columns/up.sql
Normal file
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_type INTEGER NOT NULL DEFAULT 0; -- PBKDF2
|
||||
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_iter INTEGER NOT NULL DEFAULT 5000;
|
@@ -1 +1 @@
|
||||
nightly-2018-08-24
|
||||
nightly-2018-09-12
|
||||
|
@@ -14,6 +14,8 @@ use CONFIG;
|
||||
#[allow(non_snake_case)]
|
||||
struct RegisterData {
|
||||
Email: String,
|
||||
Kdf: Option<i32>,
|
||||
KdfIterations: Option<i32>,
|
||||
Key: String,
|
||||
Keys: Option<KeysData>,
|
||||
MasterPasswordHash: String,
|
||||
@@ -41,12 +43,10 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
user_org.save(&conn);
|
||||
};
|
||||
user
|
||||
} else if CONFIG.signups_allowed {
|
||||
err!("Account with this email already exists")
|
||||
} else {
|
||||
if CONFIG.signups_allowed {
|
||||
err!("Account with this email already exists")
|
||||
} else {
|
||||
err!("Registration not allowed")
|
||||
}
|
||||
err!("Registration not allowed")
|
||||
}
|
||||
},
|
||||
None => {
|
||||
@@ -58,6 +58,14 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(client_kdf_iter) = data.KdfIterations {
|
||||
user.client_kdf_iter = client_kdf_iter;
|
||||
}
|
||||
|
||||
if let Some(client_kdf_type) = data.Kdf {
|
||||
user.client_kdf_type = client_kdf_type;
|
||||
}
|
||||
|
||||
user.set_password(&data.MasterPasswordHash);
|
||||
user.key = data.Key;
|
||||
|
||||
@@ -167,6 +175,35 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct ChangeKdfData {
|
||||
Kdf: i32,
|
||||
KdfIterations: i32,
|
||||
|
||||
MasterPasswordHash: String,
|
||||
NewMasterPasswordHash: String,
|
||||
Key: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/kdf", data = "<data>")]
|
||||
fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
let data: ChangeKdfData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
user.client_kdf_iter = data.KdfIterations;
|
||||
user.client_kdf_type = data.Kdf;
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.key = data.Key;
|
||||
user.save(&conn);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/accounts/security-stamp", data = "<data>")]
|
||||
fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
@@ -240,6 +277,11 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
||||
}
|
||||
|
||||
#[post("/accounts/delete", data = "<data>")]
|
||||
fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_account(data, headers, conn)
|
||||
}
|
||||
|
||||
#[delete("/accounts", data = "<data>")]
|
||||
fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
@@ -305,7 +347,7 @@ fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResul
|
||||
if let Some(hint) = user.password_hint {
|
||||
err!(format!("Your password hint is: {}", &hint));
|
||||
} else {
|
||||
err!(format!("Sorry, you have no password hint..."));
|
||||
err!("Sorry, you have no password hint...");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -322,20 +364,13 @@ struct PreloginData {
|
||||
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
||||
let data: PreloginData = data.into_inner().data;
|
||||
|
||||
match User::find_by_mail(&data.Email, &conn) {
|
||||
Some(user) => {
|
||||
let kdf_type = 0; // PBKDF2: 0
|
||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
||||
Some(user) => (user.client_kdf_type, user.client_kdf_iter),
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||
};
|
||||
|
||||
let _server_iter = user.password_iterations;
|
||||
let client_iter = 5000; // TODO: Make iterations user configurable
|
||||
|
||||
|
||||
Ok(Json(json!({
|
||||
"Kdf": kdf_type,
|
||||
"KdfIterations": client_iter
|
||||
})))
|
||||
},
|
||||
None => err!("Invalid user"),
|
||||
}
|
||||
Ok(Json(json!({
|
||||
"Kdf": kdf_type,
|
||||
"KdfIterations": kdf_iter
|
||||
})))
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
use std::path::Path;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use rocket::State;
|
||||
use rocket::Data;
|
||||
use rocket::http::ContentType;
|
||||
|
||||
@@ -16,7 +17,7 @@ use db::models::*;
|
||||
|
||||
use crypto;
|
||||
|
||||
use api::{self, PasswordData, JsonResult, EmptyResult, JsonUpcase};
|
||||
use api::{self, PasswordData, JsonResult, EmptyResult, JsonUpcase, WebSocketUsers, UpdateType};
|
||||
use auth::Headers;
|
||||
|
||||
use CONFIG;
|
||||
@@ -117,22 +118,22 @@ pub struct CipherData {
|
||||
}
|
||||
|
||||
#[post("/ciphers/admin", data = "<data>")]
|
||||
fn post_ciphers_admin(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn post_ciphers_admin(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
|
||||
// TODO: Implement this correctly
|
||||
post_ciphers(data, headers, conn)
|
||||
post_ciphers(data, headers, conn, ws)
|
||||
}
|
||||
|
||||
#[post("/ciphers", data = "<data>")]
|
||||
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
|
||||
let data: CipherData = data.into_inner().data;
|
||||
|
||||
let mut cipher = Cipher::new(data.Type, data.Name.clone());
|
||||
update_cipher_from_data(&mut cipher, data, &headers, false, &conn)?;
|
||||
update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &ws, UpdateType::SyncCipherCreate)?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
}
|
||||
|
||||
pub fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &Headers, shared_to_collection: bool, conn: &DbConn) -> EmptyResult {
|
||||
pub fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &Headers, shared_to_collection: bool, conn: &DbConn, ws: &State<WebSocketUsers>, ut: UpdateType) -> EmptyResult {
|
||||
if let Some(org_id) = data.OrganizationId {
|
||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||
None => err!("You don't have permission to add item to organization"),
|
||||
@@ -190,6 +191,7 @@ pub fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &
|
||||
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
||||
|
||||
cipher.save(&conn);
|
||||
ws.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
||||
|
||||
if cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn).is_err() {
|
||||
err!("Error saving the folder information")
|
||||
@@ -219,7 +221,7 @@ struct RelationsData {
|
||||
|
||||
|
||||
#[post("/ciphers/import", data = "<data>")]
|
||||
fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
let data: ImportData = data.into_inner().data;
|
||||
|
||||
// Read and create the folders
|
||||
@@ -243,7 +245,7 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
|
||||
.map(|i| folders[*i].uuid.clone());
|
||||
|
||||
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn)?;
|
||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &ws, UpdateType::SyncCipherCreate)?;
|
||||
|
||||
cipher.move_to_folder(folder_uuid, &headers.user.uuid.clone(), &conn).ok();
|
||||
}
|
||||
@@ -257,22 +259,22 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
|
||||
|
||||
|
||||
#[put("/ciphers/<uuid>/admin", data = "<data>")]
|
||||
fn put_cipher_admin(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
put_cipher(uuid, data, headers, conn)
|
||||
fn put_cipher_admin(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
|
||||
put_cipher(uuid, data, headers, conn, ws)
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/admin", data = "<data>")]
|
||||
fn post_cipher_admin(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_cipher(uuid, data, headers, conn)
|
||||
fn post_cipher_admin(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
|
||||
post_cipher(uuid, data, headers, conn, ws)
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>", data = "<data>")]
|
||||
fn post_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
put_cipher(uuid, data, headers, conn)
|
||||
fn post_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
|
||||
put_cipher(uuid, data, headers, conn, ws)
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>", data = "<data>")]
|
||||
fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
|
||||
let data: CipherData = data.into_inner().data;
|
||||
|
||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
@@ -284,7 +286,7 @@ fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn
|
||||
err!("Cipher is not write accessible")
|
||||
}
|
||||
|
||||
update_cipher_from_data(&mut cipher, data, &headers, false, &conn)?;
|
||||
update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &ws, UpdateType::SyncCipherUpdate)?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
}
|
||||
@@ -349,17 +351,17 @@ struct ShareCipherData {
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/share", data = "<data>")]
|
||||
fn post_cipher_share(uuid: String, data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn post_cipher_share(uuid: String, data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
|
||||
let data: ShareCipherData = data.into_inner().data;
|
||||
|
||||
share_cipher_by_uuid(&uuid, data, &headers, &conn)
|
||||
share_cipher_by_uuid(&uuid, data, &headers, &conn, &ws)
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>/share", data = "<data>")]
|
||||
fn put_cipher_share(uuid: String, data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn put_cipher_share(uuid: String, data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
|
||||
let data: ShareCipherData = data.into_inner().data;
|
||||
|
||||
share_cipher_by_uuid(&uuid, data, &headers, &conn)
|
||||
share_cipher_by_uuid(&uuid, data, &headers, &conn, &ws)
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -370,15 +372,15 @@ struct ShareSelectedCipherData {
|
||||
}
|
||||
|
||||
#[put("/ciphers/share", data = "<data>")]
|
||||
fn put_cipher_share_seleted(data: JsonUpcase<ShareSelectedCipherData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
fn put_cipher_share_seleted(data: JsonUpcase<ShareSelectedCipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
let mut data: ShareSelectedCipherData = data.into_inner().data;
|
||||
let mut cipher_ids: Vec<String> = Vec::new();
|
||||
|
||||
if data.Ciphers.len() == 0 {
|
||||
if data.Ciphers.is_empty() {
|
||||
err!("You must select at least one cipher.")
|
||||
}
|
||||
|
||||
if data.CollectionIds.len() == 0 {
|
||||
if data.CollectionIds.is_empty() {
|
||||
err!("You must select at least one collection.")
|
||||
}
|
||||
|
||||
@@ -391,7 +393,7 @@ fn put_cipher_share_seleted(data: JsonUpcase<ShareSelectedCipherData>, headers:
|
||||
|
||||
let attachments = Attachment::find_by_ciphers(cipher_ids, &conn);
|
||||
|
||||
if attachments.len() > 0 {
|
||||
if !attachments.is_empty() {
|
||||
err!("Ciphers should not have any attachments.")
|
||||
}
|
||||
|
||||
@@ -402,15 +404,16 @@ fn put_cipher_share_seleted(data: JsonUpcase<ShareSelectedCipherData>, headers:
|
||||
};
|
||||
|
||||
match shared_cipher_data.Cipher.Id.take() {
|
||||
Some(id) => share_cipher_by_uuid(&id, shared_cipher_data , &headers, &conn)?,
|
||||
Some(id) => share_cipher_by_uuid(&id, shared_cipher_data , &headers, &conn, &ws)?,
|
||||
None => err!("Request missing ids field")
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn share_cipher_by_uuid(uuid: &str, data: ShareCipherData, headers: &Headers, conn: &DbConn) -> JsonResult {
|
||||
fn share_cipher_by_uuid(uuid: &str, data: ShareCipherData, headers: &Headers, conn: &DbConn, ws: &State<WebSocketUsers>) -> JsonResult {
|
||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
Some(cipher) => {
|
||||
if cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
@@ -443,7 +446,7 @@ fn share_cipher_by_uuid(uuid: &str, data: ShareCipherData, headers: &Headers, co
|
||||
}
|
||||
}
|
||||
}
|
||||
update_cipher_from_data(&mut cipher, data.Cipher, &headers, shared_to_collection, &conn)?;
|
||||
update_cipher_from_data(&mut cipher, data.Cipher, &headers, shared_to_collection, &conn, &ws, UpdateType::SyncCipherUpdate)?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
}
|
||||
@@ -509,53 +512,53 @@ fn post_attachment_admin(uuid: String, data: Data, content_type: &ContentType, h
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/attachment/<attachment_id>/share", format = "multipart/form-data", data = "<data>")]
|
||||
fn post_attachment_share(uuid: String, attachment_id: String, data: Data, content_type: &ContentType, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn)?;
|
||||
fn post_attachment_share(uuid: String, attachment_id: String, data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
|
||||
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &ws)?;
|
||||
post_attachment(uuid, data, content_type, headers, conn)
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/attachment/<attachment_id>/delete-admin")]
|
||||
fn delete_attachment_post_admin(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_attachment(uuid, attachment_id, headers, conn)
|
||||
fn delete_attachment_post_admin(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
delete_attachment(uuid, attachment_id, headers, conn, ws)
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/attachment/<attachment_id>/delete")]
|
||||
fn delete_attachment_post(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_attachment(uuid, attachment_id, headers, conn)
|
||||
fn delete_attachment_post(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
delete_attachment(uuid, attachment_id, headers, conn, ws)
|
||||
}
|
||||
|
||||
#[delete("/ciphers/<uuid>/attachment/<attachment_id>")]
|
||||
fn delete_attachment(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn)
|
||||
fn delete_attachment(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &ws)
|
||||
}
|
||||
|
||||
#[delete("/ciphers/<uuid>/attachment/<attachment_id>/admin")]
|
||||
fn delete_attachment_admin(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn)
|
||||
fn delete_attachment_admin(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &ws)
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/delete")]
|
||||
fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn)
|
||||
fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &ws)
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/delete-admin")]
|
||||
fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn)
|
||||
fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &ws)
|
||||
}
|
||||
|
||||
#[delete("/ciphers/<uuid>")]
|
||||
fn delete_cipher(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn)
|
||||
fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &ws)
|
||||
}
|
||||
|
||||
#[delete("/ciphers/<uuid>/admin")]
|
||||
fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn)
|
||||
fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &ws)
|
||||
}
|
||||
|
||||
#[delete("/ciphers", data = "<data>")]
|
||||
fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
let data: Value = data.into_inner().data;
|
||||
|
||||
let uuids = match data.get("Ids") {
|
||||
@@ -567,7 +570,7 @@ fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbCon
|
||||
};
|
||||
|
||||
for uuid in uuids {
|
||||
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn) {
|
||||
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, &ws) {
|
||||
return error;
|
||||
};
|
||||
}
|
||||
@@ -576,12 +579,12 @@ fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbCon
|
||||
}
|
||||
|
||||
#[post("/ciphers/delete", data = "<data>")]
|
||||
fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_cipher_selected(data, headers, conn)
|
||||
fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
delete_cipher_selected(data, headers, conn, ws)
|
||||
}
|
||||
|
||||
#[post("/ciphers/move", data = "<data>")]
|
||||
fn move_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
fn move_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
let data = data.into_inner().data;
|
||||
|
||||
let folder_id = match data.get("FolderId") {
|
||||
@@ -627,18 +630,19 @@ fn move_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn)
|
||||
err!("Error saving the folder information")
|
||||
}
|
||||
cipher.save(&conn);
|
||||
ws.send_cipher_update(UpdateType::SyncCipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/ciphers/move", data = "<data>")]
|
||||
fn move_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
move_cipher_selected(data, headers, conn)
|
||||
fn move_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
move_cipher_selected(data, headers, conn, ws)
|
||||
}
|
||||
|
||||
#[post("/ciphers/purge", data = "<data>")]
|
||||
fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
|
||||
@@ -653,6 +657,9 @@ fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) ->
|
||||
if cipher.delete(&conn).is_err() {
|
||||
err!("Failed deleting cipher")
|
||||
}
|
||||
else {
|
||||
ws.send_cipher_update(UpdateType::SyncCipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
||||
}
|
||||
}
|
||||
|
||||
// Delete folders
|
||||
@@ -660,13 +667,16 @@ fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) ->
|
||||
if f.delete(&conn).is_err() {
|
||||
err!("Failed deleting folder")
|
||||
}
|
||||
else {
|
||||
ws.send_folder_update(UpdateType::SyncFolderCreate, &f);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
let cipher = match Cipher::find_by_uuid(uuid, conn) {
|
||||
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, ws: &State<WebSocketUsers>) -> EmptyResult {
|
||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
@@ -675,13 +685,16 @@ fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn) -> Empty
|
||||
err!("Cipher can't be deleted by user")
|
||||
}
|
||||
|
||||
match cipher.delete(conn) {
|
||||
Ok(()) => Ok(()),
|
||||
match cipher.delete(&conn) {
|
||||
Ok(()) => {
|
||||
ws.send_cipher_update(UpdateType::SyncCipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
||||
Ok(())
|
||||
}
|
||||
Err(_) => err!("Failed deleting cipher")
|
||||
}
|
||||
}
|
||||
|
||||
fn _delete_cipher_attachment_by_id(uuid: &str, attachment_id: &str, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
fn _delete_cipher_attachment_by_id(uuid: &str, attachment_id: &str, headers: &Headers, conn: &DbConn, ws: &State<WebSocketUsers>) -> EmptyResult {
|
||||
let attachment = match Attachment::find_by_id(&attachment_id, &conn) {
|
||||
Some(attachment) => attachment,
|
||||
None => err!("Attachment doesn't exist")
|
||||
@@ -702,7 +715,10 @@ fn _delete_cipher_attachment_by_id(uuid: &str, attachment_id: &str, headers: &He
|
||||
|
||||
// Delete attachment
|
||||
match attachment.delete(&conn) {
|
||||
Ok(()) => Ok(()),
|
||||
Ok(()) => {
|
||||
ws.send_cipher_update(UpdateType::SyncCipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
||||
Ok(())
|
||||
}
|
||||
Err(_) => err!("Deleting attachement failed")
|
||||
}
|
||||
}
|
||||
|
@@ -1,9 +1,10 @@
|
||||
use rocket::State;
|
||||
use rocket_contrib::{Json, Value};
|
||||
|
||||
use db::DbConn;
|
||||
use db::models::*;
|
||||
|
||||
use api::{JsonResult, EmptyResult, JsonUpcase};
|
||||
use api::{JsonResult, EmptyResult, JsonUpcase, WebSocketUsers, UpdateType};
|
||||
use auth::Headers;
|
||||
|
||||
#[get("/folders")]
|
||||
@@ -40,23 +41,24 @@ pub struct FolderData {
|
||||
}
|
||||
|
||||
#[post("/folders", data = "<data>")]
|
||||
fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
|
||||
let mut folder = Folder::new(headers.user.uuid.clone(), data.Name);
|
||||
|
||||
folder.save(&conn);
|
||||
ws.send_folder_update(UpdateType::SyncFolderCreate, &folder);
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>", data = "<data>")]
|
||||
fn post_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
put_folder(uuid, data, headers, conn)
|
||||
fn post_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
|
||||
put_folder(uuid, data, headers, conn, ws)
|
||||
}
|
||||
|
||||
#[put("/folders/<uuid>", data = "<data>")]
|
||||
fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
|
||||
let mut folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||
@@ -71,17 +73,18 @@ fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn
|
||||
folder.name = data.Name;
|
||||
|
||||
folder.save(&conn);
|
||||
ws.send_folder_update(UpdateType::SyncFolderUpdate, &folder);
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>/delete")]
|
||||
fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_folder(uuid, headers, conn)
|
||||
fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
delete_folder(uuid, headers, conn, ws)
|
||||
}
|
||||
|
||||
#[delete("/folders/<uuid>")]
|
||||
fn delete_folder(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
fn delete_folder(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder")
|
||||
@@ -93,7 +96,10 @@ fn delete_folder(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
|
||||
// Delete the actual folder entry
|
||||
match folder.delete(&conn) {
|
||||
Ok(()) => Ok(()),
|
||||
Ok(()) => {
|
||||
ws.send_folder_update(UpdateType::SyncFolderDelete, &folder);
|
||||
Ok(())
|
||||
}
|
||||
Err(_) => err!("Failed deleting folder")
|
||||
}
|
||||
}
|
||||
|
@@ -19,10 +19,12 @@ pub fn routes() -> Vec<Route> {
|
||||
get_public_keys,
|
||||
post_keys,
|
||||
post_password,
|
||||
post_kdf,
|
||||
post_sstamp,
|
||||
post_email_token,
|
||||
post_email,
|
||||
delete_account,
|
||||
post_delete_account,
|
||||
revision_date,
|
||||
password_hint,
|
||||
prelogin,
|
||||
|
@@ -1,9 +1,10 @@
|
||||
use rocket::State;
|
||||
use rocket_contrib::{Json, Value};
|
||||
use CONFIG;
|
||||
use db::DbConn;
|
||||
use db::models::*;
|
||||
|
||||
use api::{PasswordData, JsonResult, EmptyResult, NumberOrString, JsonUpcase};
|
||||
use api::{PasswordData, JsonResult, EmptyResult, NumberOrString, JsonUpcase, WebSocketUsers, UpdateType};
|
||||
use auth::{Headers, AdminHeaders, OwnerHeaders};
|
||||
|
||||
use serde::{Deserialize, Deserializer};
|
||||
@@ -288,7 +289,7 @@ fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders,
|
||||
.iter().map(|col_user| {
|
||||
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
|
||||
.unwrap()
|
||||
.to_json_collection_user_details(&col_user.read_only, &conn)
|
||||
.to_json_collection_user_details(col_user.read_only, &conn)
|
||||
}).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -601,7 +602,7 @@ struct RelationsData {
|
||||
}
|
||||
|
||||
#[post("/ciphers/import-organization?<query>", data = "<data>")]
|
||||
fn post_org_import(query: OrgIdData, data: JsonUpcase<ImportData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
fn post_org_import(query: OrgIdData, data: JsonUpcase<ImportData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
|
||||
let data: ImportData = data.into_inner().data;
|
||||
let org_id = query.organizationId;
|
||||
|
||||
@@ -630,7 +631,7 @@ fn post_org_import(query: OrgIdData, data: JsonUpcase<ImportData>, headers: Head
|
||||
// Read and create the ciphers
|
||||
let ciphers: Vec<_> = data.Ciphers.into_iter().map(|cipher_data| {
|
||||
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn).ok();
|
||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &ws, UpdateType::SyncCipherCreate).ok();
|
||||
cipher
|
||||
}).collect();
|
||||
|
||||
|
@@ -293,7 +293,7 @@ impl RegisterResponseCopy {
|
||||
RegisterResponse {
|
||||
registration_data: self.registration_data,
|
||||
version: self.version,
|
||||
challenge: challenge,
|
||||
challenge,
|
||||
client_data: self.client_data,
|
||||
}
|
||||
}
|
||||
|
@@ -18,7 +18,7 @@ fn icon(domain: String) -> Content<Vec<u8>> {
|
||||
let icon_type = ContentType::new("image", "x-icon");
|
||||
|
||||
// Validate the domain to avoid directory traversal attacks
|
||||
if domain.contains("/") || domain.contains("..") {
|
||||
if domain.contains('/') || domain.contains("..") {
|
||||
return Content(icon_type, get_fallback_icon());
|
||||
}
|
||||
|
||||
|
@@ -158,11 +158,11 @@ fn twofactor_auth(
|
||||
let providers: Vec<_> = twofactors.iter().map(|tf| tf.type_).collect();
|
||||
|
||||
// No twofactor token if twofactor is disabled
|
||||
if twofactors.len() == 0 {
|
||||
if twofactors.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let provider = match util::parse_option_string(data.get_opt("twoFactorProvider")) {
|
||||
let provider = match util::try_parse_string(data.get_opt("twoFactorProvider")) {
|
||||
Some(provider) => provider,
|
||||
None => providers[0], // If we aren't given a two factor provider, asume the first one
|
||||
};
|
||||
@@ -207,7 +207,7 @@ fn twofactor_auth(
|
||||
_ => err!("Invalid two factor provider"),
|
||||
}
|
||||
|
||||
if util::parse_option_string(data.get_opt("twoFactorRemember")).unwrap_or(0) == 1 {
|
||||
if util::try_parse_string_or(data.get_opt("twoFactorRemember"), 0) == 1 {
|
||||
Ok(Some(device.refresh_twofactor_remember()))
|
||||
} else {
|
||||
device.delete_twofactor_remember();
|
||||
@@ -274,7 +274,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for DeviceType {
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
let type_opt = headers.get_one("Device-Type");
|
||||
let type_num = util::parse_option_string(type_opt).unwrap_or(0);
|
||||
let type_num = util::try_parse_string_or(type_opt, 0);
|
||||
|
||||
Outcome::Success(DeviceType(type_num))
|
||||
}
|
||||
|
@@ -9,6 +9,7 @@ pub use self::icons::routes as icons_routes;
|
||||
pub use self::identity::routes as identity_routes;
|
||||
pub use self::web::routes as web_routes;
|
||||
pub use self::notifications::routes as notifications_routes;
|
||||
pub use self::notifications::{start_notification_server, WebSocketUsers, UpdateType};
|
||||
|
||||
use rocket::response::status::BadRequest;
|
||||
use rocket_contrib::Json;
|
||||
|
@@ -1,20 +1,26 @@
|
||||
use rocket::Route;
|
||||
use rocket_contrib::Json;
|
||||
|
||||
use db::DbConn;
|
||||
use api::JsonResult;
|
||||
use auth::Headers;
|
||||
use db::DbConn;
|
||||
|
||||
use CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![negotiate]
|
||||
routes![negotiate, websockets_err]
|
||||
}
|
||||
|
||||
#[get("/hub")]
|
||||
fn websockets_err() -> JsonResult {
|
||||
err!("'/notifications/hub' should be proxied towards the websocket server, otherwise notifications will not work. Go to the README for more info.")
|
||||
}
|
||||
|
||||
#[post("/hub/negotiate")]
|
||||
fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||
use data_encoding::BASE64URL;
|
||||
use crypto;
|
||||
use data_encoding::BASE64URL;
|
||||
|
||||
// Store this in db?
|
||||
let conn_id = BASE64URL.encode(&crypto::get_random(vec![0u8; 16]));
|
||||
|
||||
// TODO: Implement transports
|
||||
@@ -23,9 +29,338 @@ fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||
Ok(Json(json!({
|
||||
"connectionId": conn_id,
|
||||
"availableTransports":[
|
||||
// {"transport":"WebSockets", "transferFormats":["Text","Binary"]},
|
||||
{"transport":"WebSockets", "transferFormats":["Text","Binary"]},
|
||||
// {"transport":"ServerSentEvents", "transferFormats":["Text"]},
|
||||
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
|
||||
]
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Websockets server
|
||||
///
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
use ws::{self, util::Token, Factory, Handler, Handshake, Message, Sender, WebSocket};
|
||||
|
||||
use chashmap::CHashMap;
|
||||
use chrono::NaiveDateTime;
|
||||
use serde_json::from_str;
|
||||
|
||||
use db::models::{Cipher, Folder, User};
|
||||
|
||||
use rmpv::Value;
|
||||
|
||||
fn serialize(val: Value) -> Vec<u8> {
|
||||
use rmpv::encode::write_value;
|
||||
|
||||
let mut buf = Vec::new();
|
||||
write_value(&mut buf, &val).expect("Error encoding MsgPack");
|
||||
|
||||
// Add size bytes at the start
|
||||
// Extracted from BinaryMessageFormat.js
|
||||
let mut size: usize = buf.len();
|
||||
let mut len_buf: Vec<u8> = Vec::new();
|
||||
|
||||
loop {
|
||||
let mut size_part = size & 0x7f;
|
||||
size >>= 7;
|
||||
|
||||
if size > 0 {
|
||||
size_part |= 0x80;
|
||||
}
|
||||
|
||||
len_buf.push(size_part as u8);
|
||||
|
||||
if size == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
len_buf.append(&mut buf);
|
||||
len_buf
|
||||
}
|
||||
|
||||
fn serialize_date(date: NaiveDateTime) -> Value {
|
||||
let seconds: i64 = date.timestamp();
|
||||
let nanos: i64 = date.timestamp_subsec_nanos() as i64;
|
||||
let timestamp = nanos << 34 | seconds;
|
||||
|
||||
use byteorder::{BigEndian, WriteBytesExt};
|
||||
|
||||
let mut bs = [0u8; 8];
|
||||
bs.as_mut()
|
||||
.write_i64::<BigEndian>(timestamp)
|
||||
.expect("Unable to write");
|
||||
|
||||
// -1 is Timestamp
|
||||
// https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type
|
||||
Value::Ext(-1, bs.to_vec())
|
||||
}
|
||||
|
||||
fn convert_option<T: Into<Value>>(option: Option<T>) -> Value {
|
||||
match option {
|
||||
Some(a) => a.into(),
|
||||
None => Value::Nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Server WebSocket handler
|
||||
pub struct WSHandler {
|
||||
out: Sender,
|
||||
user_uuid: Option<String>,
|
||||
users: WebSocketUsers,
|
||||
}
|
||||
|
||||
const RECORD_SEPARATOR: u8 = 0x1e;
|
||||
const INITIAL_RESPONSE: [u8; 3] = [0x7b, 0x7d, RECORD_SEPARATOR]; // {, }, <RS>
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct InitialMessage {
|
||||
protocol: String,
|
||||
version: i32,
|
||||
}
|
||||
|
||||
const PING_MS: u64 = 15_000;
|
||||
const PING: Token = Token(1);
|
||||
|
||||
impl Handler for WSHandler {
|
||||
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
|
||||
// TODO: Improve this split
|
||||
let path = hs.request.resource();
|
||||
let mut query_split: Vec<_> = path.split('?').nth(1).unwrap().split('&').collect();
|
||||
query_split.sort();
|
||||
let access_token = &query_split[0][13..];
|
||||
let _id = &query_split[1][3..];
|
||||
|
||||
// Validate the user
|
||||
use auth;
|
||||
let claims = match auth::decode_jwt(access_token) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => {
|
||||
return Err(ws::Error::new(
|
||||
ws::ErrorKind::Internal,
|
||||
"Invalid access token provided",
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
// Assign the user to the handler
|
||||
let user_uuid = claims.sub;
|
||||
self.user_uuid = Some(user_uuid.clone());
|
||||
|
||||
// Add the current Sender to the user list
|
||||
let handler_insert = self.out.clone();
|
||||
let handler_update = self.out.clone();
|
||||
|
||||
self.users.map.upsert(
|
||||
user_uuid,
|
||||
|| vec![handler_insert],
|
||||
|ref mut v| v.push(handler_update),
|
||||
);
|
||||
|
||||
// Schedule a ping to keep the connection alive
|
||||
self.out.timeout(PING_MS, PING)
|
||||
}
|
||||
|
||||
fn on_message(&mut self, msg: Message) -> ws::Result<()> {
|
||||
println!("Server got message '{}'. ", msg);
|
||||
|
||||
if let Message::Text(text) = msg.clone() {
|
||||
let json = &text[..text.len() - 1]; // Remove last char
|
||||
|
||||
if let Ok(InitialMessage { protocol, version }) = from_str::<InitialMessage>(json) {
|
||||
if &protocol == "messagepack" && version == 1 {
|
||||
return self.out.send(&INITIAL_RESPONSE[..]); // Respond to initial message
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If it's not the initial message, just echo the message
|
||||
self.out.send(msg)
|
||||
}
|
||||
|
||||
fn on_timeout(&mut self, event: Token) -> ws::Result<()> {
|
||||
if event == PING {
|
||||
// send ping
|
||||
self.out.send(create_ping())?;
|
||||
|
||||
// reschedule the timeout
|
||||
self.out.timeout(PING_MS, PING)
|
||||
} else {
|
||||
Err(ws::Error::new(
|
||||
ws::ErrorKind::Internal,
|
||||
"Invalid timeout token provided",
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct WSFactory {
|
||||
pub users: WebSocketUsers,
|
||||
}
|
||||
|
||||
impl WSFactory {
|
||||
pub fn init() -> Self {
|
||||
WSFactory {
|
||||
users: WebSocketUsers {
|
||||
map: Arc::new(CHashMap::new()),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Factory for WSFactory {
|
||||
type Handler = WSHandler;
|
||||
|
||||
fn connection_made(&mut self, out: Sender) -> Self::Handler {
|
||||
println!("WS: Connection made");
|
||||
WSHandler {
|
||||
out,
|
||||
user_uuid: None,
|
||||
users: self.users.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn connection_lost(&mut self, handler: Self::Handler) {
|
||||
println!("WS: Connection lost");
|
||||
|
||||
// Remove handler
|
||||
let user_uuid = &handler.user_uuid.unwrap();
|
||||
if let Some(mut user_conn) = self.users.map.get_mut(user_uuid) {
|
||||
user_conn.remove_item(&handler.out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct WebSocketUsers {
|
||||
pub map: Arc<CHashMap<String, Vec<Sender>>>,
|
||||
}
|
||||
|
||||
impl WebSocketUsers {
|
||||
fn send_update(&self, user_uuid: &String, data: Vec<u8>) -> ws::Result<()> {
|
||||
if let Some(user) = self.map.get(user_uuid) {
|
||||
for sender in user.iter() {
|
||||
sender.send(data.clone())?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// NOTE: The last modified date needs to be updated before calling these methods
|
||||
pub fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||
let data = create_update(
|
||||
vec![
|
||||
("UserId".into(), user.uuid.clone().into()),
|
||||
("Date".into(), serialize_date(user.updated_at)),
|
||||
],
|
||||
ut,
|
||||
);
|
||||
|
||||
self.send_update(&user.uuid.clone(), data).ok();
|
||||
}
|
||||
|
||||
pub fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {
|
||||
let data = create_update(
|
||||
vec![
|
||||
("Id".into(), folder.uuid.clone().into()),
|
||||
("UserId".into(), folder.user_uuid.clone().into()),
|
||||
("RevisionDate".into(), serialize_date(folder.updated_at)),
|
||||
],
|
||||
ut,
|
||||
);
|
||||
|
||||
self.send_update(&folder.user_uuid, data).ok();
|
||||
}
|
||||
|
||||
pub fn send_cipher_update(&self, ut: UpdateType, cipher: &Cipher, user_uuids: &Vec<String>) {
|
||||
let user_uuid = convert_option(cipher.user_uuid.clone());
|
||||
let org_uuid = convert_option(cipher.organization_uuid.clone());
|
||||
|
||||
let data = create_update(
|
||||
vec![
|
||||
("Id".into(), cipher.uuid.clone().into()),
|
||||
("UserId".into(), user_uuid),
|
||||
("OrganizationId".into(), org_uuid),
|
||||
("CollectionIds".into(), Value::Nil),
|
||||
("RevisionDate".into(), serialize_date(cipher.updated_at)),
|
||||
],
|
||||
ut,
|
||||
);
|
||||
|
||||
for uuid in user_uuids {
|
||||
self.send_update(&uuid, data.clone()).ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Message Structure
|
||||
[
|
||||
1, // MessageType.Invocation
|
||||
{}, // Headers
|
||||
null, // InvocationId
|
||||
"ReceiveMessage", // Target
|
||||
[ // Arguments
|
||||
{
|
||||
"ContextId": "app_id",
|
||||
"Type": ut as i32,
|
||||
"Payload": {}
|
||||
}
|
||||
]
|
||||
]
|
||||
*/
|
||||
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType) -> Vec<u8> {
|
||||
use rmpv::Value as V;
|
||||
|
||||
let value = V::Array(vec![
|
||||
1.into(),
|
||||
V::Array(vec![]),
|
||||
V::Nil,
|
||||
"ReceiveMessage".into(),
|
||||
V::Array(vec![V::Map(vec![
|
||||
("ContextId".into(), "app_id".into()),
|
||||
("Type".into(), (ut as i32).into()),
|
||||
("Payload".into(), payload.into()),
|
||||
])]),
|
||||
]);
|
||||
|
||||
serialize(value)
|
||||
}
|
||||
|
||||
fn create_ping() -> Vec<u8> {
|
||||
serialize(Value::Array(vec![6.into()]))
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub enum UpdateType {
|
||||
SyncCipherUpdate = 0,
|
||||
SyncCipherCreate = 1,
|
||||
SyncLoginDelete = 2,
|
||||
SyncFolderDelete = 3,
|
||||
SyncCiphers = 4,
|
||||
|
||||
SyncVault = 5,
|
||||
SyncOrgKeys = 6,
|
||||
SyncFolderCreate = 7,
|
||||
SyncFolderUpdate = 8,
|
||||
SyncCipherDelete = 9,
|
||||
SyncSettings = 10,
|
||||
|
||||
LogOut = 11,
|
||||
}
|
||||
|
||||
pub fn start_notification_server() -> WebSocketUsers {
|
||||
let factory = WSFactory::init();
|
||||
let users = factory.users.clone();
|
||||
|
||||
thread::spawn(move || {
|
||||
WebSocket::new(factory)
|
||||
.unwrap()
|
||||
.listen(format!("0.0.0.0:{}", CONFIG.websocket_port))
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
users
|
||||
}
|
||||
|
@@ -78,7 +78,7 @@ impl Attachment {
|
||||
println!("ERROR: Failed with 10 retries");
|
||||
return Err(err)
|
||||
} else {
|
||||
retries = retries - 1;
|
||||
retries -= 1;
|
||||
println!("Had to retry! Retries left: {}", retries);
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
continue
|
||||
|
@@ -130,19 +130,25 @@ impl Cipher {
|
||||
json_object
|
||||
}
|
||||
|
||||
pub fn update_users_revision(&self, conn: &DbConn) {
|
||||
pub fn update_users_revision(&self, conn: &DbConn) -> Vec<String> {
|
||||
let mut user_uuids = Vec::new();
|
||||
match self.user_uuid {
|
||||
Some(ref user_uuid) => User::update_uuid_revision(&user_uuid, conn),
|
||||
Some(ref user_uuid) => {
|
||||
User::update_uuid_revision(&user_uuid, conn);
|
||||
user_uuids.push(user_uuid.clone())
|
||||
},
|
||||
None => { // Belongs to Organization, need to update affected users
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
UserOrganization::find_by_cipher_and_org(&self.uuid, &org_uuid, conn)
|
||||
.iter()
|
||||
.for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn)
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
user_uuids.push(user_org.user_uuid.clone())
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
user_uuids
|
||||
}
|
||||
|
||||
pub fn save(&mut self, conn: &DbConn) -> bool {
|
||||
@@ -157,7 +163,7 @@ impl Cipher {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
|
||||
pub fn delete(&self, conn: &DbConn) -> QueryResult<()> {
|
||||
self.update_users_revision(conn);
|
||||
|
||||
FolderCipher::delete_all_by_cipher(&self.uuid, &conn)?;
|
||||
@@ -166,7 +172,7 @@ impl Cipher {
|
||||
|
||||
diesel::delete(
|
||||
ciphers::table.filter(
|
||||
ciphers::uuid.eq(self.uuid)
|
||||
ciphers::uuid.eq(&self.uuid)
|
||||
)
|
||||
).execute(&**conn).and(Ok(()))
|
||||
}
|
||||
@@ -356,6 +362,6 @@ impl Cipher {
|
||||
)
|
||||
))
|
||||
.select(ciphers_collections::collection_uuid)
|
||||
.load::<String>(&**conn).unwrap_or(vec![])
|
||||
.load::<String>(&**conn).unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
@@ -82,13 +82,13 @@ impl Folder {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
|
||||
pub fn delete(&self, conn: &DbConn) -> QueryResult<()> {
|
||||
User::update_uuid_revision(&self.user_uuid, conn);
|
||||
FolderCipher::delete_all_by_folder(&self.uuid, &conn)?;
|
||||
|
||||
diesel::delete(
|
||||
folders::table.filter(
|
||||
folders::uuid.eq(self.uuid)
|
||||
folders::uuid.eq(&self.uuid)
|
||||
)
|
||||
).execute(&**conn).and(Ok(()))
|
||||
}
|
||||
|
@@ -194,7 +194,7 @@ impl UserOrganization {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_collection_user_details(&self, read_only: &bool, conn: &DbConn) -> JsonValue {
|
||||
pub fn to_json_collection_user_details(&self, read_only: bool, conn: &DbConn) -> JsonValue {
|
||||
let user = User::find_by_uuid(&self.user_uuid, conn).unwrap();
|
||||
|
||||
json!({
|
||||
@@ -281,14 +281,14 @@ impl UserOrganization {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||
.load::<Self>(&**conn).unwrap_or(vec![])
|
||||
.load::<Self>(&**conn).unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn find_invited_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
.filter(users_organizations::status.eq(UserOrgStatus::Invited as i32))
|
||||
.load::<Self>(&**conn).unwrap_or(vec![])
|
||||
.load::<Self>(&**conn).unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
|
@@ -35,17 +35,20 @@ pub struct User {
|
||||
|
||||
pub equivalent_domains: String,
|
||||
pub excluded_globals: String,
|
||||
|
||||
pub client_kdf_type: i32,
|
||||
pub client_kdf_iter: i32,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
impl User {
|
||||
pub const CLIENT_KDF_TYPE_DEFAULT: i32 = 0; // PBKDF2: 0
|
||||
pub const CLIENT_KDF_ITER_DEFAULT: i32 = 5_000;
|
||||
|
||||
pub fn new(mail: String) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
let email = mail.to_lowercase();
|
||||
|
||||
let iterations = CONFIG.password_iterations;
|
||||
let salt = crypto::get_random_64();
|
||||
|
||||
Self {
|
||||
uuid: Uuid::new_v4().to_string(),
|
||||
created_at: now,
|
||||
@@ -55,8 +58,8 @@ impl User {
|
||||
key: String::new(),
|
||||
|
||||
password_hash: Vec::new(),
|
||||
salt,
|
||||
password_iterations: iterations,
|
||||
salt: crypto::get_random_64(),
|
||||
password_iterations: CONFIG.password_iterations,
|
||||
|
||||
security_stamp: Uuid::new_v4().to_string(),
|
||||
|
||||
@@ -69,6 +72,9 @@ impl User {
|
||||
|
||||
equivalent_domains: "[]".to_string(),
|
||||
excluded_globals: "[]".to_string(),
|
||||
|
||||
client_kdf_type: Self::CLIENT_KDF_TYPE_DEFAULT,
|
||||
client_kdf_iter: Self::CLIENT_KDF_ITER_DEFAULT,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,7 +119,7 @@ impl User {
|
||||
let orgs = UserOrganization::find_by_user(&self.uuid, conn);
|
||||
let orgs_json: Vec<JsonValue> = orgs.iter().map(|c| c.to_json(&conn)).collect();
|
||||
|
||||
let twofactor_enabled = TwoFactor::find_by_user(&self.uuid, conn).len() > 0;
|
||||
let twofactor_enabled = !TwoFactor::find_by_user(&self.uuid, conn).is_empty();
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
|
@@ -72,6 +72,12 @@ table! {
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
invitations (email) {
|
||||
email -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
organizations (uuid) {
|
||||
uuid -> Text,
|
||||
@@ -110,12 +116,8 @@ table! {
|
||||
security_stamp -> Text,
|
||||
equivalent_domains -> Text,
|
||||
excluded_globals -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
invitations (email) {
|
||||
email -> Text,
|
||||
client_kdf_type -> Integer,
|
||||
client_kdf_iter -> Integer,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -164,6 +166,7 @@ allow_tables_to_appear_in_same_query!(
|
||||
devices,
|
||||
folders,
|
||||
folders_ciphers,
|
||||
invitations,
|
||||
organizations,
|
||||
twofactor,
|
||||
users,
|
||||
|
22
src/mail.rs
22
src/mail.rs
@@ -1,7 +1,7 @@
|
||||
use std::error::Error;
|
||||
use native_tls::{Protocol, TlsConnector};
|
||||
use lettre::{EmailTransport, SmtpTransport, ClientTlsParameters, ClientSecurity};
|
||||
use lettre::smtp::{ConnectionReuseParameters, SmtpTransportBuilder};
|
||||
use lettre::{Transport, SmtpTransport, SmtpClient, ClientTlsParameters, ClientSecurity};
|
||||
use lettre::smtp::ConnectionReuseParameters;
|
||||
use lettre::smtp::authentication::Credentials;
|
||||
use lettre_email::EmailBuilder;
|
||||
|
||||
@@ -9,8 +9,8 @@ use MailConfig;
|
||||
|
||||
fn mailer(config: &MailConfig) -> SmtpTransport {
|
||||
let client_security = if config.smtp_ssl {
|
||||
let mut tls_builder = TlsConnector::builder().unwrap();
|
||||
tls_builder.supported_protocols(&[Protocol::Tlsv11, Protocol::Tlsv12]).unwrap();
|
||||
let mut tls_builder = TlsConnector::builder();
|
||||
tls_builder.min_protocol_version(Some(Protocol::Tlsv11));
|
||||
ClientSecurity::Required(
|
||||
ClientTlsParameters::new(config.smtp_host.to_owned(), tls_builder.build().unwrap())
|
||||
)
|
||||
@@ -18,22 +18,22 @@ fn mailer(config: &MailConfig) -> SmtpTransport {
|
||||
ClientSecurity::None
|
||||
};
|
||||
|
||||
let smtp_transport = SmtpTransportBuilder::new(
|
||||
let smtp_client = SmtpClient::new(
|
||||
(config.smtp_host.to_owned().as_str(), config.smtp_port),
|
||||
client_security
|
||||
).unwrap();
|
||||
|
||||
let smtp_transport = match (&config.smtp_username, &config.smtp_password) {
|
||||
let smtp_client = match (&config.smtp_username, &config.smtp_password) {
|
||||
(Some(username), Some(password)) => {
|
||||
smtp_transport.credentials(Credentials::new(username.to_owned(), password.to_owned()))
|
||||
smtp_client.credentials(Credentials::new(username.to_owned(), password.to_owned()))
|
||||
},
|
||||
(_, _) => smtp_transport,
|
||||
(_, _) => smtp_client,
|
||||
};
|
||||
|
||||
smtp_transport
|
||||
smtp_client
|
||||
.smtp_utf8(true)
|
||||
.connection_reuse(ConnectionReuseParameters::NoReuse)
|
||||
.build()
|
||||
.transport()
|
||||
}
|
||||
|
||||
pub fn send_password_hint(address: &str, hint: Option<String>, config: &MailConfig) -> Result<(), String> {
|
||||
@@ -56,7 +56,7 @@ pub fn send_password_hint(address: &str, hint: Option<String>, config: &MailConf
|
||||
.body(body)
|
||||
.build().unwrap();
|
||||
|
||||
match mailer(config).send(&email) {
|
||||
match mailer(config).send(email.into()) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => Err(e.description().to_string()),
|
||||
}
|
||||
|
99
src/main.rs
99
src/main.rs
@@ -1,10 +1,13 @@
|
||||
#![feature(plugin, custom_derive)]
|
||||
#![feature(plugin, custom_derive, vec_remove_item, try_trait)]
|
||||
#![plugin(rocket_codegen)]
|
||||
#![allow(proc_macro_derive_resolution_fallback)] // TODO: Remove this when diesel update fixes warnings
|
||||
extern crate rocket;
|
||||
extern crate rocket_contrib;
|
||||
extern crate reqwest;
|
||||
extern crate multipart;
|
||||
extern crate ws;
|
||||
extern crate rmpv;
|
||||
extern crate chashmap;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
@@ -31,8 +34,9 @@ extern crate lettre;
|
||||
extern crate lettre_email;
|
||||
extern crate native_tls;
|
||||
extern crate fast_chemail;
|
||||
extern crate byteorder;
|
||||
|
||||
use std::{env, path::Path, process::{exit, Command}};
|
||||
use std::{path::Path, process::{exit, Command}};
|
||||
use rocket::Rocket;
|
||||
|
||||
#[macro_use]
|
||||
@@ -52,6 +56,7 @@ fn init_rocket() -> Rocket {
|
||||
.mount("/icons", api::icons_routes())
|
||||
.mount("/notifications", api::notifications_routes())
|
||||
.manage(db::init_pool())
|
||||
.manage(api::start_notification_server())
|
||||
}
|
||||
|
||||
// Embed the migrations from the migrations folder into the application
|
||||
@@ -73,9 +78,8 @@ mod migrations {
|
||||
fn main() {
|
||||
check_db();
|
||||
check_rsa_keys();
|
||||
check_web_vault();
|
||||
migrations::run_migrations();
|
||||
|
||||
check_web_vault();
|
||||
migrations::run_migrations();
|
||||
|
||||
init_rocket().launch();
|
||||
}
|
||||
@@ -172,27 +176,32 @@ pub struct MailConfig {
|
||||
|
||||
impl MailConfig {
|
||||
fn load() -> Option<Self> {
|
||||
let smtp_host = env::var("SMTP_HOST").ok();
|
||||
|
||||
use util::{get_env, get_env_or};
|
||||
|
||||
// When SMTP_HOST is absent, we assume the user does not want to enable it.
|
||||
if smtp_host.is_none() {
|
||||
return None
|
||||
}
|
||||
let smtp_host = match get_env("SMTP_HOST") {
|
||||
Some(host) => host,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let smtp_ssl = util::parse_option_string(env::var("SMTP_SSL").ok()).unwrap_or(true);
|
||||
let smtp_port = util::parse_option_string(env::var("SMTP_PORT").ok())
|
||||
.unwrap_or_else(|| {
|
||||
if smtp_ssl {
|
||||
587u16
|
||||
} else {
|
||||
25u16
|
||||
}
|
||||
});
|
||||
let smtp_from = get_env("SMTP_FROM").unwrap_or_else(|| {
|
||||
println!("Please specify SMTP_FROM to enable SMTP support.");
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let smtp_username = env::var("SMTP_USERNAME").ok();
|
||||
let smtp_password = env::var("SMTP_PASSWORD").ok().or_else(|| {
|
||||
let smtp_ssl = get_env_or("SMTP_SSL", true);
|
||||
let smtp_port = get_env("SMTP_PORT").unwrap_or_else(||
|
||||
if smtp_ssl {
|
||||
587u16
|
||||
} else {
|
||||
25u16
|
||||
}
|
||||
);
|
||||
|
||||
let smtp_username = get_env("SMTP_USERNAME");
|
||||
let smtp_password = get_env("SMTP_PASSWORD").or_else(|| {
|
||||
if smtp_username.as_ref().is_some() {
|
||||
println!("Please specify SMTP_PASSWORD to enable SMTP support.");
|
||||
println!("SMTP_PASSWORD is mandatory when specifying SMTP_USERNAME.");
|
||||
exit(1);
|
||||
} else {
|
||||
None
|
||||
@@ -200,13 +209,12 @@ impl MailConfig {
|
||||
});
|
||||
|
||||
Some(MailConfig {
|
||||
smtp_host: smtp_host.unwrap(),
|
||||
smtp_port: smtp_port,
|
||||
smtp_ssl: smtp_ssl,
|
||||
smtp_from: util::parse_option_string(env::var("SMTP_FROM").ok())
|
||||
.unwrap_or("bitwarden-rs@localhost".to_string()),
|
||||
smtp_username: smtp_username,
|
||||
smtp_password: smtp_password,
|
||||
smtp_host,
|
||||
smtp_port,
|
||||
smtp_ssl,
|
||||
smtp_from,
|
||||
smtp_username,
|
||||
smtp_password,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -224,6 +232,8 @@ pub struct Config {
|
||||
web_vault_folder: String,
|
||||
web_vault_enabled: bool,
|
||||
|
||||
websocket_port: i32,
|
||||
|
||||
local_icon_extractor: bool,
|
||||
signups_allowed: bool,
|
||||
invitations_allowed: bool,
|
||||
@@ -238,32 +248,35 @@ pub struct Config {
|
||||
|
||||
impl Config {
|
||||
fn load() -> Self {
|
||||
use util::{get_env, get_env_or};
|
||||
dotenv::dotenv().ok();
|
||||
|
||||
let df = env::var("DATA_FOLDER").unwrap_or("data".into());
|
||||
let key = env::var("RSA_KEY_FILENAME").unwrap_or(format!("{}/{}", &df, "rsa_key"));
|
||||
let df = get_env_or("DATA_FOLDER", "data".to_string());
|
||||
let key = get_env_or("RSA_KEY_FILENAME", format!("{}/{}", &df, "rsa_key"));
|
||||
|
||||
let domain = env::var("DOMAIN");
|
||||
let domain = get_env("DOMAIN");
|
||||
|
||||
Config {
|
||||
database_url: env::var("DATABASE_URL").unwrap_or(format!("{}/{}", &df, "db.sqlite3")),
|
||||
icon_cache_folder: env::var("ICON_CACHE_FOLDER").unwrap_or(format!("{}/{}", &df, "icon_cache")),
|
||||
attachments_folder: env::var("ATTACHMENTS_FOLDER").unwrap_or(format!("{}/{}", &df, "attachments")),
|
||||
database_url: get_env_or("DATABASE_URL", format!("{}/{}", &df, "db.sqlite3")),
|
||||
icon_cache_folder: get_env_or("ICON_CACHE_FOLDER", format!("{}/{}", &df, "icon_cache")),
|
||||
attachments_folder: get_env_or("ATTACHMENTS_FOLDER", format!("{}/{}", &df, "attachments")),
|
||||
|
||||
private_rsa_key: format!("{}.der", &key),
|
||||
private_rsa_key_pem: format!("{}.pem", &key),
|
||||
public_rsa_key: format!("{}.pub.der", &key),
|
||||
|
||||
web_vault_folder: env::var("WEB_VAULT_FOLDER").unwrap_or("web-vault/".into()),
|
||||
web_vault_enabled: util::parse_option_string(env::var("WEB_VAULT_ENABLED").ok()).unwrap_or(true),
|
||||
web_vault_folder: get_env_or("WEB_VAULT_FOLDER", "web-vault/".into()),
|
||||
web_vault_enabled: get_env_or("WEB_VAULT_ENABLED", true),
|
||||
|
||||
local_icon_extractor: util::parse_option_string(env::var("LOCAL_ICON_EXTRACTOR").ok()).unwrap_or(false),
|
||||
signups_allowed: util::parse_option_string(env::var("SIGNUPS_ALLOWED").ok()).unwrap_or(true),
|
||||
invitations_allowed: util::parse_option_string(env::var("INVITATIONS_ALLOWED").ok()).unwrap_or(true),
|
||||
password_iterations: util::parse_option_string(env::var("PASSWORD_ITERATIONS").ok()).unwrap_or(100_000),
|
||||
show_password_hint: util::parse_option_string(env::var("SHOW_PASSWORD_HINT").ok()).unwrap_or(true),
|
||||
websocket_port: get_env_or("WEBSOCKET_PORT", 3012),
|
||||
|
||||
domain_set: domain.is_ok(),
|
||||
local_icon_extractor: get_env_or("LOCAL_ICON_EXTRACTOR", false),
|
||||
signups_allowed: get_env_or("SIGNUPS_ALLOWED", true),
|
||||
invitations_allowed: get_env_or("INVITATIONS_ALLOWED", true),
|
||||
password_iterations: get_env_or("PASSWORD_ITERATIONS", 100_000),
|
||||
show_password_hint: get_env_or("SHOW_PASSWORD_HINT", true),
|
||||
|
||||
domain_set: domain.is_some(),
|
||||
domain: domain.unwrap_or("http://localhost".into()),
|
||||
|
||||
mail: MailConfig::load(),
|
||||
|
28
src/util.rs
28
src/util.rs
@@ -97,6 +97,7 @@ pub fn get_display_size(size: i32) -> String {
|
||||
///
|
||||
|
||||
use std::str::FromStr;
|
||||
use std::ops::Try;
|
||||
|
||||
pub fn upcase_first(s: &str) -> String {
|
||||
let mut c = s.chars();
|
||||
@@ -106,14 +107,37 @@ pub fn upcase_first(s: &str) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_option_string<S, T>(string: Option<S>) -> Option<T> where S: AsRef<str>, T: FromStr {
|
||||
if let Some(Ok(value)) = string.map(|s| s.as_ref().parse::<T>()) {
|
||||
pub fn try_parse_string<S, T, U>(string: impl Try<Ok = S, Error=U>) -> Option<T> where S: AsRef<str>, T: FromStr {
|
||||
if let Ok(Ok(value)) = string.into_result().map(|s| s.as_ref().parse::<T>()) {
|
||||
Some(value)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_parse_string_or<S, T, U>(string: impl Try<Ok = S, Error=U>, default: T) -> T where S: AsRef<str>, T: FromStr {
|
||||
if let Ok(Ok(value)) = string.into_result().map(|s| s.as_ref().parse::<T>()) {
|
||||
value
|
||||
} else {
|
||||
default
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
///
|
||||
/// Env methods
|
||||
///
|
||||
|
||||
use std::env;
|
||||
|
||||
pub fn get_env<V>(key: &str) -> Option<V> where V: FromStr {
|
||||
try_parse_string(env::var(key))
|
||||
}
|
||||
|
||||
pub fn get_env_or<V>(key: &str, default: V) -> V where V: FromStr {
|
||||
try_parse_string_or(env::var(key), default)
|
||||
}
|
||||
|
||||
///
|
||||
/// Date util methods
|
||||
///
|
||||
|
Reference in New Issue
Block a user