mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 10:45:57 +03:00
Compare commits
36 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
dd00591082 | ||
|
1e9dd2fd4e | ||
|
62bc58e145 | ||
|
760e0ab805 | ||
|
8f5bfe7938 | ||
|
b359df7045 | ||
|
f1b1000600 | ||
|
c0e248c457 | ||
|
f510a1b060 | ||
|
fafc3883c6 | ||
|
1bdb98d139 | ||
|
2f5ca88fb1 | ||
|
e7a24159c5 | ||
|
e056cc8178 | ||
|
8ce4c79612 | ||
|
77d9641323 | ||
|
31e4237247 | ||
|
c32c65d367 | ||
|
0a4dbaf307 | ||
|
daa66b08dc | ||
|
d613fa1e68 | ||
|
55fbd8d468 | ||
|
adf40291e8 | ||
|
acfc900997 | ||
|
0a08b1afc8 | ||
|
eb48a3fac2 | ||
|
2e7fa6440b | ||
|
9ecc98c3cc | ||
|
02fd68d63b | ||
|
235bce1ecb | ||
|
e985221b50 | ||
|
77cf63c06d | ||
|
faec050a6d | ||
|
22304f4925 | ||
|
58a78ffa54 | ||
|
64f6c60bfd |
@@ -4,7 +4,7 @@
|
|||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
FROM node:8-alpine as vault
|
FROM node:8-alpine as vault
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.3.0"
|
ENV VAULT_VERSION "v2.4.0"
|
||||||
|
|
||||||
ENV URL "https://github.com/bitwarden/web.git"
|
ENV URL "https://github.com/bitwarden/web.git"
|
||||||
|
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
FROM node:8-alpine as vault
|
FROM node:8-alpine as vault
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.3.0"
|
ENV VAULT_VERSION "v2.4.0"
|
||||||
|
|
||||||
ENV URL "https://github.com/bitwarden/web.git"
|
ENV URL "https://github.com/bitwarden/web.git"
|
||||||
|
|
||||||
@@ -80,7 +80,7 @@ RUN cargo build --release --target=aarch64-unknown-linux-gnu -v
|
|||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM resin/aarch64-debian:stretch
|
FROM balenalib/aarch64-debian:stretch
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_ENV "staging"
|
||||||
ENV ROCKET_WORKERS=10
|
ENV ROCKET_WORKERS=10
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
FROM node:8-alpine as vault
|
FROM node:8-alpine as vault
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.3.0"
|
ENV VAULT_VERSION "v2.4.0"
|
||||||
|
|
||||||
ENV URL "https://github.com/bitwarden/web.git"
|
ENV URL "https://github.com/bitwarden/web.git"
|
||||||
|
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
FROM node:8-alpine as vault
|
FROM node:8-alpine as vault
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.3.0"
|
ENV VAULT_VERSION "v2.4.0"
|
||||||
|
|
||||||
ENV URL "https://github.com/bitwarden/web.git"
|
ENV URL "https://github.com/bitwarden/web.git"
|
||||||
|
|
||||||
@@ -80,7 +80,7 @@ RUN cargo build --release --target=armv7-unknown-linux-gnueabihf -v
|
|||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM resin/armv7hf-debian:stretch
|
FROM balenalib/armv7hf-debian:stretch
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_ENV "staging"
|
||||||
ENV ROCKET_WORKERS=10
|
ENV ROCKET_WORKERS=10
|
||||||
|
19
PROXY.md
19
PROXY.md
@@ -31,26 +31,25 @@ localhost:443 {
|
|||||||
## Nginx (by shauder)
|
## Nginx (by shauder)
|
||||||
```nginx
|
```nginx
|
||||||
server {
|
server {
|
||||||
include conf.d/ssl/ssl.conf;
|
|
||||||
|
|
||||||
listen 443 ssl http2;
|
listen 443 ssl http2;
|
||||||
server_name vault.*;
|
server_name vault.*;
|
||||||
|
|
||||||
location /notifications/hub/negotiate {
|
# Specify SSL config if using a shared one.
|
||||||
include conf.d/proxy-confs/proxy.conf;
|
#include conf.d/ssl/ssl.conf;
|
||||||
proxy_pass http://<SERVER>:80;
|
|
||||||
}
|
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
include conf.d/proxy-confs/proxy.conf;
|
|
||||||
proxy_pass http://<SERVER>:80;
|
proxy_pass http://<SERVER>:80;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /notifications/hub {
|
location /notifications/hub {
|
||||||
proxy_pass http://<SERVER>:3012;
|
proxy_pass http://<SERVER>:3012;
|
||||||
proxy_set_header Upgrade $http_upgrade;
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
proxy_set_header Connection "upgrade";
|
proxy_set_header Connection "upgrade";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
location /notifications/hub/negotiate {
|
||||||
|
proxy_pass http://<SERVER>:80;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
43
README.md
43
README.md
@@ -43,6 +43,7 @@ _*Note, that this project is not associated with the [Bitwarden](https://bitward
|
|||||||
- [Building binary](#building-binary)
|
- [Building binary](#building-binary)
|
||||||
- [Available packages](#available-packages)
|
- [Available packages](#available-packages)
|
||||||
- [Arch Linux](#arch-linux)
|
- [Arch Linux](#arch-linux)
|
||||||
|
- [Kubernetes deployment](#kubernetes-deployment)
|
||||||
- [Backing up your vault](#backing-up-your-vault)
|
- [Backing up your vault](#backing-up-your-vault)
|
||||||
- [1. the sqlite3 database](#1-the-sqlite3-database)
|
- [1. the sqlite3 database](#1-the-sqlite3-database)
|
||||||
- [2. the attachments folder](#2-the-attachments-folder)
|
- [2. the attachments folder](#2-the-attachments-folder)
|
||||||
@@ -141,7 +142,7 @@ docker run -d --name bitwarden \
|
|||||||
-p 80:80 \
|
-p 80:80 \
|
||||||
mprasil/bitwarden:latest
|
mprasil/bitwarden:latest
|
||||||
```
|
```
|
||||||
Note: While users can't register on their own, they can still be invited by already registered users. Read bellow if you also want to disable that.
|
Note: While users can't register on their own, they can still be invited by already registered users. Read below if you also want to disable that.
|
||||||
|
|
||||||
### Disable invitations
|
### Disable invitations
|
||||||
|
|
||||||
@@ -157,6 +158,8 @@ docker run -d --name bitwarden \
|
|||||||
```
|
```
|
||||||
### Configure server administrator
|
### Configure server administrator
|
||||||
|
|
||||||
|
**Warning:** *Never* use your regular account for the admin functionality. This is a bit of a hack using the Vault interface for something it's not intended to do and it breaks any other functionality for the account. Please set up and use separate account just for this functionality.
|
||||||
|
|
||||||
You can configure one email account to be server administrator via the `SERVER_ADMIN_EMAIL` environment variable:
|
You can configure one email account to be server administrator via the `SERVER_ADMIN_EMAIL` environment variable:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
@@ -169,7 +172,7 @@ docker run -d --name bitwarden \
|
|||||||
|
|
||||||
This will give the user extra functionality and privileges to manage users on the server. In the Vault, the user will see a special (virtual) organization called `bitwarden_rs`. This organization doesn't actually exist and can't be used for most things. (can't have collections or ciphers) Instead it just contains all the users registered on the server. Deleting users from this organization will actually completely delete the user from the server. Inviting users into this organization will just invite the user so they are able to register, but will not grant any organization membership. (unlike inviting user to regular organization)
|
This will give the user extra functionality and privileges to manage users on the server. In the Vault, the user will see a special (virtual) organization called `bitwarden_rs`. This organization doesn't actually exist and can't be used for most things. (can't have collections or ciphers) Instead it just contains all the users registered on the server. Deleting users from this organization will actually completely delete the user from the server. Inviting users into this organization will just invite the user so they are able to register, but will not grant any organization membership. (unlike inviting user to regular organization)
|
||||||
|
|
||||||
You can think of the `bitwarden_rs` organization as sort of Admin interface to manage users on the server. Due to the virtual nature of this organization, it is missing some internal data structures and most of the functionality. It is thus strongly recommended to use dedicated account for `SERVER_ADMIN_EMAIL` and this account shouldn't be used for actually storing passwords. Also keep in mind that deleting user this way removes the user permanently without any way to restore the deleted data just as if user deleted their own account.
|
You can think of the `bitwarden_rs` organization as sort of Admin interface to manage users on the server. Keep in mind that deleting user this way removes the user permanently without any way to restore the deleted data just as if user deleted their own account.
|
||||||
|
|
||||||
### Enabling HTTPS
|
### Enabling HTTPS
|
||||||
To enable HTTPS, you need to configure the `ROCKET_TLS`.
|
To enable HTTPS, you need to configure the `ROCKET_TLS`.
|
||||||
@@ -192,17 +195,19 @@ docker run -d --name bitwarden \
|
|||||||
```
|
```
|
||||||
Note that you need to mount ssl files and you need to forward appropriate port.
|
Note that you need to mount ssl files and you need to forward appropriate port.
|
||||||
|
|
||||||
|
Due to what is likely a certificate validation bug in Android, you need to make sure that your certificate includes the full chain of trust. In the case of certbot, this means using `fullchain.pem` instead of `cert.pem`.
|
||||||
|
|
||||||
Softwares used for getting certs are often using symlinks. If that is the case, both locations need to be accessible to the docker container.
|
Softwares used for getting certs are often using symlinks. If that is the case, both locations need to be accessible to the docker container.
|
||||||
|
|
||||||
Example: [certbot](https://certbot.eff.org/) will create a folder that contains the needed `cert.pem` and `privacy.pem` files in `/etc/letsencrypt/live/mydomain/`
|
Example: [certbot](https://certbot.eff.org/) will create a folder that contains the needed `fullchain.pem` and `privkey.pem` files in `/etc/letsencrypt/live/mydomain/`
|
||||||
|
|
||||||
These files are symlinked to `../../archive/mydomain/mykey.pem`
|
These files are symlinked to `../../archive/mydomain/privkey.pem`
|
||||||
|
|
||||||
So to use from bitwarden container:
|
So to use from bitwarden container:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker run -d --name bitwarden \
|
docker run -d --name bitwarden \
|
||||||
-e ROCKET_TLS='{certs="/ssl/live/mydomain/cert.pem",key="/ssl/live/mydomain/privkey.pem"}' \
|
-e ROCKET_TLS='{certs="/ssl/live/mydomain/fullchain.pem",key="/ssl/live/mydomain/privkey.pem"}' \
|
||||||
-v /etc/letsencrypt/:/ssl/ \
|
-v /etc/letsencrypt/:/ssl/ \
|
||||||
-v /bw-data/:/data/ \
|
-v /bw-data/:/data/ \
|
||||||
-p 443:80 \
|
-p 443:80 \
|
||||||
@@ -212,12 +217,23 @@ docker run -d --name bitwarden \
|
|||||||
*Important: This does not apply to the mobile clients, which use push notifications.*
|
*Important: This does not apply to the mobile clients, which use push notifications.*
|
||||||
|
|
||||||
To enable WebSockets notifications, an external reverse proxy is necessary, and it must be configured to do the following:
|
To enable WebSockets notifications, an external reverse proxy is necessary, and it must be configured to do the following:
|
||||||
- Route the `/notifications/hub` endpoint to the WebSocket server, by default at port `3012`, making sure to pass the `Connection` and `Upgrade` headers.
|
- Route the `/notifications/hub` endpoint to the WebSocket server, by default at port `3012`, making sure to pass the `Connection` and `Upgrade` headers. (Note the port can be changed with `WEBSOCKET_PORT` variable)
|
||||||
- Route everything else, including `/notifications/hub/negotiate`, to the standard Rocket server, by default at port `80`.
|
- Route everything else, including `/notifications/hub/negotiate`, to the standard Rocket server, by default at port `80`.
|
||||||
- If using Docker, you may need to map both ports with the `-p` flag
|
- If using Docker, you may need to map both ports with the `-p` flag
|
||||||
|
|
||||||
Example configurations are included in the [PROXY.md](https://github.com/dani-garcia/bitwarden_rs/blob/master/PROXY.md) file.
|
Example configurations are included in the [PROXY.md](https://github.com/dani-garcia/bitwarden_rs/blob/master/PROXY.md) file.
|
||||||
|
|
||||||
|
Then you need to enable WebSockets negotiation on the bitwarden_rs side by setting the `WEBSOCKET_ENABLED` variable to `true`:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker run -d --name bitwarden \
|
||||||
|
-e WEBSOCKET_ENABLED=true \
|
||||||
|
-v /bw-data/:/data/ \
|
||||||
|
-p 80:80 \
|
||||||
|
-p 3012:3012 \
|
||||||
|
mprasil/bitwarden:latest
|
||||||
|
```
|
||||||
|
|
||||||
Note: The reason for this workaround is the lack of support for WebSockets from Rocket (though [it's a planned feature](https://github.com/SergioBenitez/Rocket/issues/90)), which forces us to launch a secondary server on a separate port.
|
Note: The reason for this workaround is the lack of support for WebSockets from Rocket (though [it's a planned feature](https://github.com/SergioBenitez/Rocket/issues/90)), which forces us to launch a secondary server on a separate port.
|
||||||
|
|
||||||
### Enabling U2F authentication
|
### Enabling U2F authentication
|
||||||
@@ -315,7 +331,7 @@ docker run -d --name bitwarden \
|
|||||||
|
|
||||||
When you run bitwarden_rs, it spawns `2 * <number of cpu cores>` workers to handle requests. On some systems this might lead to low number of workers and hence slow performance, so the default in the docker image is changed to spawn 10 threads. You can override this setting to increase or decrease the number of workers by setting the `ROCKET_WORKERS` variable.
|
When you run bitwarden_rs, it spawns `2 * <number of cpu cores>` workers to handle requests. On some systems this might lead to low number of workers and hence slow performance, so the default in the docker image is changed to spawn 10 threads. You can override this setting to increase or decrease the number of workers by setting the `ROCKET_WORKERS` variable.
|
||||||
|
|
||||||
In the example bellow, we're starting with 20 workers:
|
In the example below, we're starting with 20 workers:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker run -d --name bitwarden \
|
docker run -d --name bitwarden \
|
||||||
@@ -402,6 +418,11 @@ For building binary outside the Docker environment and running it locally withou
|
|||||||
|
|
||||||
Bitwarden_rs is already packaged for Archlinux thanks to @mqus. There is an [AUR package](https://aur.archlinux.org/packages/bitwarden_rs) (optionally with the [vault web interface](https://aur.archlinux.org/packages/bitwarden_rs-vault/) ) available.
|
Bitwarden_rs is already packaged for Archlinux thanks to @mqus. There is an [AUR package](https://aur.archlinux.org/packages/bitwarden_rs) (optionally with the [vault web interface](https://aur.archlinux.org/packages/bitwarden_rs-vault/) ) available.
|
||||||
|
|
||||||
|
## Kubernetes deployment
|
||||||
|
|
||||||
|
Please check the [kubernetes-bitwarden_rs](https://github.com/icicimov/kubernetes-bitwarden_rs) repository for example deployment in Kubernetes.
|
||||||
|
It will setup a fully functional and secure `bitwarden_rs` application in Kubernetes behind [nginx-ingress-controller](https://github.com/kubernetes/ingress-nginx) and AWS [ELBv1](https://aws.amazon.com/elasticloadbalancing/features/#Details_for_Elastic_Load_Balancing_Products). It provides little bit more than just simple deployment but you can use all or just part of the manifests depending on your needs and setup.
|
||||||
|
|
||||||
## Backing up your vault
|
## Backing up your vault
|
||||||
|
|
||||||
### 1. the sqlite3 database
|
### 1. the sqlite3 database
|
||||||
@@ -409,10 +430,10 @@ Bitwarden_rs is already packaged for Archlinux thanks to @mqus. There is an [AUR
|
|||||||
The sqlite3 database should be backed up using the proper sqlite3 backup command. This will ensure the database does not become corrupted if the backup happens during a database write.
|
The sqlite3 database should be backed up using the proper sqlite3 backup command. This will ensure the database does not become corrupted if the backup happens during a database write.
|
||||||
|
|
||||||
```
|
```
|
||||||
sqlite3 /$DATA_FOLDER/db.sqlite3 ".backup '/$DATA_FOLDER/db-backup/backup.sq3'"
|
sqlite3 /$DATA_FOLDER/db.sqlite3 ".backup '/$DATA_FOLDER/db-backup/backup.sqlite3'"
|
||||||
```
|
```
|
||||||
|
|
||||||
This command can be run via a CRON job everyday, however note that it will overwrite the same backup.sq3 file each time. This backup file should therefore be saved via incremental backup either using a CRON job command that appends a timestamp or from another backup app such as Duplicati.
|
This command can be run via a CRON job everyday, however note that it will overwrite the same `backup.sqlite3` file each time. This backup file should therefore be saved via incremental backup either using a CRON job command that appends a timestamp or from another backup app such as Duplicati. To restore simply overwrite `db.sqlite3` with `backup.sqlite3` (while bitwarden_rs is stopped).
|
||||||
|
|
||||||
### 2. the attachments folder
|
### 2. the attachments folder
|
||||||
|
|
||||||
@@ -431,8 +452,8 @@ This is optional, the icon cache can re-download itself however if you have a la
|
|||||||
The root user inside the container is already pretty limited in what it can do, so the default setup should be secure enough. However if you wish to go the extra mile to avoid using root even in container, here's how you can do that:
|
The root user inside the container is already pretty limited in what it can do, so the default setup should be secure enough. However if you wish to go the extra mile to avoid using root even in container, here's how you can do that:
|
||||||
|
|
||||||
1. Create a data folder that's owned by non-root user, so you can use that user to write persistent data. Get the user `id`. In linux you can run `stat <folder_name>` to get/verify the owner ID.
|
1. Create a data folder that's owned by non-root user, so you can use that user to write persistent data. Get the user `id`. In linux you can run `stat <folder_name>` to get/verify the owner ID.
|
||||||
2. When you run the container, you need to provide the user ID as one of the parameters. Note that this needs to be in the numeric form and not the user name, because docker would try to find such user defined inside the image, which would likely not be there or it would have different ID than your local user and hence wouldn't be able to write the persistent data. This can be done with the `--user` parameter.
|
2. When you run the container, you need to provide the user ID as one of the parameters. Note that this needs to be in the numeric form and not the username, because docker would try to find such user-defined inside the image, which would likely not be there or it would have different ID than your local user and hence wouldn't be able to write the persistent data. This can be done with the `--user` parameter.
|
||||||
3. bitwarden_rs listens on port `80` inside the container by default, this [won't work with non-root user](https://www.w3.org/Daemon/User/Installation/PrivilegedPorts.html), because regular users aren't allowed to open port bellow `1024`. To overcome this, you need to configure server to listen on a different port, you can use `ROCKET_PORT` to do that.
|
3. bitwarden_rs listens on port `80` inside the container by default, this [won't work with non-root user](https://www.w3.org/Daemon/User/Installation/PrivilegedPorts.html), because regular users aren't allowed to open port below `1024`. To overcome this, you need to configure server to listen on a different port, you can use `ROCKET_PORT` to do that.
|
||||||
|
|
||||||
Here's sample docker run, that uses user with id `1000` and with the port redirection configured, so that inside container the service is listening on port `8080` and docker translates that to external (host) port `80`:
|
Here's sample docker run, that uses user with id `1000` and with the port redirection configured, so that inside container the service is listening on port `8080` and docker translates that to external (host) port `80`:
|
||||||
|
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
--- a/src/app/services/services.module.ts
|
--- a/src/app/services/services.module.ts
|
||||||
+++ b/src/app/services/services.module.ts
|
+++ b/src/app/services/services.module.ts
|
||||||
@@ -120,20 +120,17 @@ const notificationsService = new NotificationsService(userService, syncService,
|
@@ -120,20 +120,16 @@ const notificationsService = new NotificationsService(userService, syncService,
|
||||||
const environmentService = new EnvironmentService(apiService, storageService, notificationsService);
|
const environmentService = new EnvironmentService(apiService, storageService, notificationsService);
|
||||||
const auditService = new AuditService(cryptoFunctionService, apiService);
|
const auditService = new AuditService(cryptoFunctionService, apiService);
|
||||||
|
|
||||||
@@ -22,7 +22,6 @@
|
|||||||
+ const isDev = false;
|
+ const isDev = false;
|
||||||
+ environmentService.baseUrl = window.location.origin;
|
+ environmentService.baseUrl = window.location.origin;
|
||||||
+ environmentService.notificationsUrl = window.location.origin + '/notifications';
|
+ environmentService.notificationsUrl = window.location.origin + '/notifications';
|
||||||
+
|
apiService.setUrls({
|
||||||
await apiService.setUrls({
|
|
||||||
base: isDev ? null : window.location.origin,
|
base: isDev ? null : window.location.origin,
|
||||||
api: isDev ? 'http://localhost:4000' : null,
|
api: isDev ? 'http://localhost:4000' : null,
|
||||||
|
@@ -39,7 +39,9 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
if Invitation::take(&data.Email, &conn) {
|
if Invitation::take(&data.Email, &conn) {
|
||||||
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).iter_mut() {
|
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).iter_mut() {
|
||||||
user_org.status = UserOrgStatus::Accepted as i32;
|
user_org.status = UserOrgStatus::Accepted as i32;
|
||||||
user_org.save(&conn);
|
if user_org.save(&conn).is_err() {
|
||||||
|
err!("Failed to accept user to organization")
|
||||||
|
}
|
||||||
};
|
};
|
||||||
user
|
user
|
||||||
} else if CONFIG.signups_allowed {
|
} else if CONFIG.signups_allowed {
|
||||||
@@ -82,9 +84,10 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
user.public_key = Some(keys.PublicKey);
|
user.public_key = Some(keys.PublicKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
user.save(&conn);
|
match user.save(&conn) {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
Ok(())
|
Err(_) => err!("Failed to save user")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/accounts/profile")]
|
#[get("/accounts/profile")]
|
||||||
@@ -117,9 +120,10 @@ fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -
|
|||||||
Some(ref h) if h.is_empty() => None,
|
Some(ref h) if h.is_empty() => None,
|
||||||
_ => data.MasterPasswordHint,
|
_ => data.MasterPasswordHint,
|
||||||
};
|
};
|
||||||
user.save(&conn);
|
match user.save(&conn) {
|
||||||
|
Ok(()) => Ok(Json(user.to_json(&conn))),
|
||||||
Ok(Json(user.to_json(&conn)))
|
Err(_) => err!("Failed to save user profile")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/users/<uuid>/public-key")]
|
#[get("/users/<uuid>/public-key")]
|
||||||
@@ -145,11 +149,14 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
|
|||||||
user.private_key = Some(data.EncryptedPrivateKey);
|
user.private_key = Some(data.EncryptedPrivateKey);
|
||||||
user.public_key = Some(data.PublicKey);
|
user.public_key = Some(data.PublicKey);
|
||||||
|
|
||||||
user.save(&conn);
|
match user.save(&conn) {
|
||||||
|
Ok(()) => Ok(Json(user.to_json(&conn))),
|
||||||
Ok(Json(user.to_json(&conn)))
|
Err(_) => err!("Failed to save the user's keys")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct ChangePassData {
|
struct ChangePassData {
|
||||||
@@ -169,9 +176,10 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
|||||||
|
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash);
|
||||||
user.key = data.Key;
|
user.key = data.Key;
|
||||||
user.save(&conn);
|
match user.save(&conn) {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
Ok(())
|
Err(_) => err!("Failed to save password")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -198,9 +206,10 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
|||||||
user.client_kdf_type = data.Kdf;
|
user.client_kdf_type = data.Kdf;
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash);
|
||||||
user.key = data.Key;
|
user.key = data.Key;
|
||||||
user.save(&conn);
|
match user.save(&conn) {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
Ok(())
|
Err(_) => err!("Failed to save password settings")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/security-stamp", data = "<data>")]
|
#[post("/accounts/security-stamp", data = "<data>")]
|
||||||
@@ -213,9 +222,10 @@ fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -
|
|||||||
}
|
}
|
||||||
|
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
user.save(&conn);
|
match user.save(&conn) {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
Ok(())
|
Err(_) => err!("Failed to reset security stamp")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -270,9 +280,10 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
|||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash);
|
||||||
user.key = data.Key;
|
user.key = data.Key;
|
||||||
|
|
||||||
user.save(&conn);
|
match user.save(&conn) {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
Ok(())
|
Err(_) => err!("Failed to save email address")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/delete", data = "<data>")]
|
#[post("/accounts/delete", data = "<data>")]
|
||||||
|
@@ -22,8 +22,14 @@ use auth::Headers;
|
|||||||
|
|
||||||
use CONFIG;
|
use CONFIG;
|
||||||
|
|
||||||
#[get("/sync")]
|
#[derive(FromForm)]
|
||||||
fn sync(headers: Headers, conn: DbConn) -> JsonResult {
|
#[allow(non_snake_case)]
|
||||||
|
struct SyncData {
|
||||||
|
excludeDomains: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/sync?<data>")]
|
||||||
|
fn sync(data: SyncData, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let user_json = headers.user.to_json(&conn);
|
let user_json = headers.user.to_json(&conn);
|
||||||
|
|
||||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||||
@@ -35,7 +41,7 @@ fn sync(headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
||||||
let ciphers_json: Vec<Value> = ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
let ciphers_json: Vec<Value> = ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
|
||||||
|
|
||||||
let domains_json = api::core::get_eq_domains(headers).unwrap().into_inner();
|
let domains_json = if data.excludeDomains { Value::Null } else { api::core::get_eq_domains(headers).unwrap().into_inner() };
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Profile": user_json,
|
"Profile": user_json,
|
||||||
@@ -47,6 +53,13 @@ fn sync(headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/sync")]
|
||||||
|
fn sync_no_query(headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
let sync_data = SyncData {
|
||||||
|
excludeDomains: false,
|
||||||
|
};
|
||||||
|
sync(sync_data, headers, conn)
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/ciphers")]
|
#[get("/ciphers")]
|
||||||
fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
@@ -229,11 +242,15 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
|
|||||||
let data: ImportData = data.into_inner().data;
|
let data: ImportData = data.into_inner().data;
|
||||||
|
|
||||||
// Read and create the folders
|
// Read and create the folders
|
||||||
let folders: Vec<_> = data.Folders.into_iter().map(|folder| {
|
let mut folders: Vec<_> = Vec::new();
|
||||||
let mut folder = Folder::new(headers.user.uuid.clone(), folder.Name);
|
for folder in data.Folders.into_iter() {
|
||||||
folder.save(&conn);
|
let mut new_folder = Folder::new(headers.user.uuid.clone(), folder.Name);
|
||||||
folder
|
if new_folder.save(&conn).is_err() {
|
||||||
}).collect();
|
err!("Failed importing folders")
|
||||||
|
} else {
|
||||||
|
folders.push(new_folder);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Read the relations between folders and ciphers
|
// Read the relations between folders and ciphers
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
@@ -47,7 +47,9 @@ fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, ws
|
|||||||
|
|
||||||
let mut folder = Folder::new(headers.user.uuid.clone(), data.Name);
|
let mut folder = Folder::new(headers.user.uuid.clone(), data.Name);
|
||||||
|
|
||||||
folder.save(&conn);
|
if folder.save(&conn).is_err() {
|
||||||
|
err!("Failed to save folder")
|
||||||
|
}
|
||||||
ws.send_folder_update(UpdateType::SyncFolderCreate, &folder);
|
ws.send_folder_update(UpdateType::SyncFolderCreate, &folder);
|
||||||
|
|
||||||
Ok(Json(folder.to_json()))
|
Ok(Json(folder.to_json()))
|
||||||
@@ -73,7 +75,9 @@ fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn
|
|||||||
|
|
||||||
folder.name = data.Name;
|
folder.name = data.Name;
|
||||||
|
|
||||||
folder.save(&conn);
|
if folder.save(&conn).is_err() {
|
||||||
|
err!("Failed to save folder")
|
||||||
|
}
|
||||||
ws.send_folder_update(UpdateType::SyncFolderUpdate, &folder);
|
ws.send_folder_update(UpdateType::SyncFolderUpdate, &folder);
|
||||||
|
|
||||||
Ok(Json(folder.to_json()))
|
Ok(Json(folder.to_json()))
|
||||||
|
@@ -30,6 +30,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
prelogin,
|
prelogin,
|
||||||
|
|
||||||
sync,
|
sync,
|
||||||
|
sync_no_query,
|
||||||
|
|
||||||
get_ciphers,
|
get_ciphers,
|
||||||
get_cipher,
|
get_cipher,
|
||||||
@@ -79,6 +80,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
activate_authenticator,
|
activate_authenticator,
|
||||||
activate_authenticator_put,
|
activate_authenticator_put,
|
||||||
generate_u2f,
|
generate_u2f,
|
||||||
|
generate_u2f_challenge,
|
||||||
activate_u2f,
|
activate_u2f,
|
||||||
activate_u2f_put,
|
activate_u2f_put,
|
||||||
|
|
||||||
@@ -120,6 +122,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
|
|
||||||
get_eq_domains,
|
get_eq_domains,
|
||||||
post_eq_domains,
|
post_eq_domains,
|
||||||
|
put_eq_domains,
|
||||||
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -215,7 +218,7 @@ struct EquivDomainData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/settings/domains", data = "<data>")]
|
#[post("/settings/domains", data = "<data>")]
|
||||||
fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let data: EquivDomainData = data.into_inner().data;
|
let data: EquivDomainData = data.into_inner().data;
|
||||||
|
|
||||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
||||||
@@ -227,7 +230,14 @@ fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: Db
|
|||||||
user.excluded_globals = to_string(&excluded_globals).unwrap_or("[]".to_string());
|
user.excluded_globals = to_string(&excluded_globals).unwrap_or("[]".to_string());
|
||||||
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or("[]".to_string());
|
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or("[]".to_string());
|
||||||
|
|
||||||
user.save(&conn);
|
match user.save(&conn) {
|
||||||
|
Ok(()) => Ok(Json(json!({}))),
|
||||||
|
Err(_) => err!("Failed to save user")
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
}
|
||||||
|
|
||||||
|
#[put("/settings/domains", data = "<data>")]
|
||||||
|
fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
post_eq_domains(data, headers, conn)
|
||||||
}
|
}
|
||||||
|
@@ -49,8 +49,13 @@ fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn
|
|||||||
user_org.type_ = UserOrgType::Owner as i32;
|
user_org.type_ = UserOrgType::Owner as i32;
|
||||||
user_org.status = UserOrgStatus::Confirmed as i32;
|
user_org.status = UserOrgStatus::Confirmed as i32;
|
||||||
|
|
||||||
org.save(&conn);
|
if org.save(&conn).is_err() {
|
||||||
user_org.save(&conn);
|
err!("Failed creating organization")
|
||||||
|
}
|
||||||
|
if user_org.save(&conn).is_err() {
|
||||||
|
err!("Failed to add user to organization")
|
||||||
|
}
|
||||||
|
|
||||||
if collection.save(&conn).is_err() {
|
if collection.save(&conn).is_err() {
|
||||||
err!("Failed creating Collection");
|
err!("Failed creating Collection");
|
||||||
}
|
}
|
||||||
@@ -128,9 +133,11 @@ fn post_organization(org_id: String, _headers: OwnerHeaders, data: JsonUpcase<Or
|
|||||||
|
|
||||||
org.name = data.Name;
|
org.name = data.Name;
|
||||||
org.billing_email = data.BillingEmail;
|
org.billing_email = data.BillingEmail;
|
||||||
org.save(&conn);
|
|
||||||
|
|
||||||
Ok(Json(org.to_json()))
|
match org.save(&conn) {
|
||||||
|
Ok(()) => Ok(Json(org.to_json())),
|
||||||
|
Err(_) => err!("Failed to modify organization")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GET /api/collections?writeOnly=false
|
// GET /api/collections?writeOnly=false
|
||||||
@@ -384,11 +391,11 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
|||||||
match invitation.save(&conn) {
|
match invitation.save(&conn) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
let mut user = User::new(email.clone());
|
let mut user = User::new(email.clone());
|
||||||
if user.save(&conn) {
|
if user.save(&conn).is_err() {
|
||||||
|
err!("Failed to create placeholder for invited user")
|
||||||
|
} else {
|
||||||
user_org_status = UserOrgStatus::Invited as i32;
|
user_org_status = UserOrgStatus::Invited as i32;
|
||||||
user
|
user
|
||||||
} else {
|
|
||||||
err!("Failed to create placeholder for invited user")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(_) => err!(format!("Failed to invite: {}", email))
|
Err(_) => err!(format!("Failed to invite: {}", email))
|
||||||
@@ -427,7 +434,9 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
new_user.save(&conn);
|
if new_user.save(&conn).is_err() {
|
||||||
|
err!("Failed to add user to organization")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -458,9 +467,10 @@ fn confirm_invite(org_id: String, org_user_id: String, data: JsonUpcase<Value>,
|
|||||||
None => err!("Invalid key provided")
|
None => err!("Invalid key provided")
|
||||||
};
|
};
|
||||||
|
|
||||||
user_to_confirm.save(&conn);
|
match user_to_confirm.save(&conn) {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
Ok(())
|
Err(_) => err!("Failed to add user to organization")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/users/<org_user_id>")]
|
#[get("/organizations/<org_id>/users/<org_user_id>")]
|
||||||
@@ -551,9 +561,10 @@ fn edit_user(org_id: String, org_user_id: String, data: JsonUpcase<EditUserData>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
user_to_edit.save(&conn);
|
match user_to_edit.save(&conn) {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
Ok(())
|
Err(_) => err!("Failed to save user data")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/organizations/<org_id>/users/<org_user_id>")]
|
#[delete("/organizations/<org_id>/users/<org_user_id>")]
|
||||||
|
@@ -75,9 +75,10 @@ fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
|
|||||||
|
|
||||||
// Remove the recovery code, not needed without twofactors
|
// Remove the recovery code, not needed without twofactors
|
||||||
user.totp_recover = None;
|
user.totp_recover = None;
|
||||||
user.save(&conn);
|
match user.save(&conn) {
|
||||||
|
Ok(()) => Ok(Json(json!({}))),
|
||||||
Ok(Json(json!({})))
|
Err(_) => err!("Failed to remove the user's two factor recovery code")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -217,7 +218,9 @@ fn _generate_recover_code(user: &mut User, conn: &DbConn) {
|
|||||||
if user.totp_recover.is_none() {
|
if user.totp_recover.is_none() {
|
||||||
let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20]));
|
let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20]));
|
||||||
user.totp_recover = Some(totp_recover);
|
user.totp_recover = Some(totp_recover);
|
||||||
user.save(conn);
|
if user.save(conn).is_err() {
|
||||||
|
println!("Error: Failed to save the user's two factor recovery code")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -249,27 +252,34 @@ fn generate_u2f(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn)
|
|||||||
let user_uuid = &headers.user.uuid;
|
let user_uuid = &headers.user.uuid;
|
||||||
|
|
||||||
let u2f_type = TwoFactorType::U2f as i32;
|
let u2f_type = TwoFactorType::U2f as i32;
|
||||||
let register_type = TwoFactorType::U2fRegisterChallenge;
|
let enabled = TwoFactor::find_by_user_and_type(user_uuid, u2f_type, &conn).is_some();
|
||||||
let (enabled, challenge) = match TwoFactor::find_by_user_and_type(user_uuid, u2f_type, &conn) {
|
|
||||||
Some(_) => (true, String::new()),
|
|
||||||
None => {
|
|
||||||
let c = _create_u2f_challenge(user_uuid, register_type, &conn);
|
|
||||||
(false, c.challenge)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": enabled,
|
"Enabled": enabled,
|
||||||
"Challenge": {
|
|
||||||
"UserId": headers.user.uuid,
|
|
||||||
"AppId": APP_ID.to_string(),
|
|
||||||
"Challenge": challenge,
|
|
||||||
"Version": U2F_VERSION,
|
|
||||||
},
|
|
||||||
"Object": "twoFactorU2f"
|
"Object": "twoFactorU2f"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[post("/two-factor/get-u2f-challenge", data = "<data>")]
|
||||||
|
fn generate_u2f_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
let data: PasswordData = data.into_inner().data;
|
||||||
|
|
||||||
|
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
|
err!("Invalid password");
|
||||||
|
}
|
||||||
|
|
||||||
|
let user_uuid = &headers.user.uuid;
|
||||||
|
|
||||||
|
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fRegisterChallenge, &conn).challenge;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"UserId": headers.user.uuid,
|
||||||
|
"AppId": APP_ID.to_string(),
|
||||||
|
"Challenge": challenge,
|
||||||
|
"Version": U2F_VERSION,
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct EnableU2FData {
|
struct EnableU2FData {
|
||||||
|
@@ -46,16 +46,17 @@ fn _refresh_login(data: &ConnectData, _device_type: DeviceType, conn: DbConn) ->
|
|||||||
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
|
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
|
||||||
|
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
||||||
device.save(&conn);
|
match device.save(&conn) {
|
||||||
|
Ok(()) => Ok(Json(json!({
|
||||||
Ok(Json(json!({
|
"access_token": access_token,
|
||||||
"access_token": access_token,
|
"expires_in": expires_in,
|
||||||
"expires_in": expires_in,
|
"token_type": "Bearer",
|
||||||
"token_type": "Bearer",
|
"refresh_token": device.refresh_token,
|
||||||
"refresh_token": device.refresh_token,
|
"Key": user.key,
|
||||||
"Key": user.key,
|
"PrivateKey": user.private_key,
|
||||||
"PrivateKey": user.private_key,
|
}))),
|
||||||
})))
|
Err(_) => err!("Failed to add device to user")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn, remote: Option<SocketAddr>) -> JsonResult {
|
fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn, remote: Option<SocketAddr>) -> JsonResult {
|
||||||
@@ -128,7 +129,9 @@ fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn, re
|
|||||||
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
|
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
|
||||||
|
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
||||||
device.save(&conn);
|
if device.save(&conn).is_err() {
|
||||||
|
err!("Failed to add device to user")
|
||||||
|
}
|
||||||
|
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
"access_token": access_token,
|
"access_token": access_token,
|
||||||
|
@@ -1,5 +1,6 @@
|
|||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use rocket_contrib::Json;
|
use rocket_contrib::Json;
|
||||||
|
use serde_json::Value as JsonValue;
|
||||||
|
|
||||||
use api::JsonResult;
|
use api::JsonResult;
|
||||||
use auth::Headers;
|
use auth::Headers;
|
||||||
@@ -22,17 +23,20 @@ fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
|
|||||||
use data_encoding::BASE64URL;
|
use data_encoding::BASE64URL;
|
||||||
|
|
||||||
let conn_id = BASE64URL.encode(&crypto::get_random(vec![0u8; 16]));
|
let conn_id = BASE64URL.encode(&crypto::get_random(vec![0u8; 16]));
|
||||||
|
let mut available_transports: Vec<JsonValue> = Vec::new();
|
||||||
|
|
||||||
|
if CONFIG.websocket_enabled {
|
||||||
|
available_transports.push(json!({"transport":"WebSockets", "transferFormats":["Text","Binary"]}));
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: Implement transports
|
// TODO: Implement transports
|
||||||
// Rocket WS support: https://github.com/SergioBenitez/Rocket/issues/90
|
// Rocket WS support: https://github.com/SergioBenitez/Rocket/issues/90
|
||||||
// Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33
|
// Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33
|
||||||
|
// {"transport":"ServerSentEvents", "transferFormats":["Text"]},
|
||||||
|
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"connectionId": conn_id,
|
"connectionId": conn_id,
|
||||||
"availableTransports":[
|
"availableTransports": available_transports
|
||||||
{"transport":"WebSockets", "transferFormats":["Text","Binary"]},
|
|
||||||
// {"transport":"ServerSentEvents", "transferFormats":["Text"]},
|
|
||||||
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
|
|
||||||
]
|
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -356,12 +360,14 @@ pub fn start_notification_server() -> WebSocketUsers {
|
|||||||
let factory = WSFactory::init();
|
let factory = WSFactory::init();
|
||||||
let users = factory.users.clone();
|
let users = factory.users.clone();
|
||||||
|
|
||||||
thread::spawn(move || {
|
if CONFIG.websocket_enabled {
|
||||||
WebSocket::new(factory)
|
thread::spawn(move || {
|
||||||
.unwrap()
|
WebSocket::new(factory)
|
||||||
.listen(&CONFIG.websocket_url)
|
.unwrap()
|
||||||
.unwrap();
|
.listen(&CONFIG.websocket_url)
|
||||||
});
|
.unwrap();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
users
|
users
|
||||||
}
|
}
|
||||||
|
@@ -112,15 +112,11 @@ use db::schema::devices;
|
|||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl Device {
|
impl Device {
|
||||||
pub fn save(&mut self, conn: &DbConn) -> bool {
|
pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
|
||||||
self.updated_at = Utc::now().naive_utc();
|
self.updated_at = Utc::now().naive_utc();
|
||||||
|
|
||||||
match diesel::replace_into(devices::table)
|
diesel::replace_into(devices::table)
|
||||||
.values(&*self)
|
.values(&*self).execute(&**conn).and(Ok(()))
|
||||||
.execute(&**conn) {
|
|
||||||
Ok(1) => true, // One row inserted
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
|
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
|
||||||
|
@@ -70,16 +70,12 @@ use db::schema::{folders, folders_ciphers};
|
|||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl Folder {
|
impl Folder {
|
||||||
pub fn save(&mut self, conn: &DbConn) -> bool {
|
pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
|
||||||
User::update_uuid_revision(&self.user_uuid, conn);
|
User::update_uuid_revision(&self.user_uuid, conn);
|
||||||
self.updated_at = Utc::now().naive_utc();
|
self.updated_at = Utc::now().naive_utc();
|
||||||
|
|
||||||
match diesel::replace_into(folders::table)
|
diesel::replace_into(folders::table)
|
||||||
.values(&*self)
|
.values(&*self).execute(&**conn).and(Ok(()))
|
||||||
.execute(&**conn) {
|
|
||||||
Ok(1) => true, // One row inserted
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(&self, conn: &DbConn) -> QueryResult<()> {
|
pub fn delete(&self, conn: &DbConn) -> QueryResult<()> {
|
||||||
|
@@ -137,9 +137,9 @@ use db::schema::{organizations, users_organizations, users_collections, ciphers_
|
|||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl Organization {
|
impl Organization {
|
||||||
pub fn save(&mut self, conn: &DbConn) -> bool {
|
pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
|
||||||
if self.uuid == Organization::VIRTUAL_ID {
|
if self.uuid == Organization::VIRTUAL_ID {
|
||||||
return false
|
return Err(diesel::result::Error::NotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
UserOrganization::find_by_org(&self.uuid, conn)
|
UserOrganization::find_by_org(&self.uuid, conn)
|
||||||
@@ -148,12 +148,8 @@ impl Organization {
|
|||||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||||
});
|
});
|
||||||
|
|
||||||
match diesel::replace_into(organizations::table)
|
diesel::replace_into(organizations::table)
|
||||||
.values(&*self)
|
.values(&*self).execute(&**conn).and(Ok(()))
|
||||||
.execute(&**conn) {
|
|
||||||
Ok(1) => true, // One row inserted
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
|
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
|
||||||
@@ -266,18 +262,14 @@ impl UserOrganization {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save(&mut self, conn: &DbConn) -> bool {
|
pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
|
||||||
if self.org_uuid == Organization::VIRTUAL_ID {
|
if self.org_uuid == Organization::VIRTUAL_ID {
|
||||||
return false
|
return Err(diesel::result::Error::NotFound)
|
||||||
}
|
}
|
||||||
User::update_uuid_revision(&self.user_uuid, conn);
|
User::update_uuid_revision(&self.user_uuid, conn);
|
||||||
|
|
||||||
match diesel::replace_into(users_organizations::table)
|
diesel::replace_into(users_organizations::table)
|
||||||
.values(&*self)
|
.values(&*self).execute(&**conn).and(Ok(()))
|
||||||
.execute(&**conn) {
|
|
||||||
Ok(1) => true, // One row inserted
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
|
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
|
||||||
|
@@ -148,15 +148,11 @@ impl User {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
pub fn save(&mut self, conn: &DbConn) -> bool {
|
pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
|
||||||
self.updated_at = Utc::now().naive_utc();
|
self.updated_at = Utc::now().naive_utc();
|
||||||
|
|
||||||
match diesel::replace_into(users::table) // Insert or update
|
diesel::replace_into(users::table) // Insert or update
|
||||||
.values(&*self)
|
.values(&*self).execute(&**conn).and(Ok(()))
|
||||||
.execute(&**conn) {
|
|
||||||
Ok(1) => true, // One row inserted
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
|
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
|
||||||
|
@@ -232,6 +232,7 @@ pub struct Config {
|
|||||||
web_vault_folder: String,
|
web_vault_folder: String,
|
||||||
web_vault_enabled: bool,
|
web_vault_enabled: bool,
|
||||||
|
|
||||||
|
websocket_enabled: bool,
|
||||||
websocket_url: String,
|
websocket_url: String,
|
||||||
|
|
||||||
local_icon_extractor: bool,
|
local_icon_extractor: bool,
|
||||||
@@ -269,6 +270,7 @@ impl Config {
|
|||||||
web_vault_folder: get_env_or("WEB_VAULT_FOLDER", "web-vault/".into()),
|
web_vault_folder: get_env_or("WEB_VAULT_FOLDER", "web-vault/".into()),
|
||||||
web_vault_enabled: get_env_or("WEB_VAULT_ENABLED", true),
|
web_vault_enabled: get_env_or("WEB_VAULT_ENABLED", true),
|
||||||
|
|
||||||
|
websocket_enabled: get_env_or("WEBSOCKET_ENABLED", false),
|
||||||
websocket_url: format!("{}:{}", get_env_or("WEBSOCKET_ADDRESS", "0.0.0.0".to_string()), get_env_or("WEBSOCKET_PORT", 3012)),
|
websocket_url: format!("{}:{}", get_env_or("WEBSOCKET_ADDRESS", "0.0.0.0".to_string()), get_env_or("WEBSOCKET_PORT", 3012)),
|
||||||
|
|
||||||
local_icon_extractor: get_env_or("LOCAL_ICON_EXTRACTOR", false),
|
local_icon_extractor: get_env_or("LOCAL_ICON_EXTRACTOR", false),
|
||||||
|
Reference in New Issue
Block a user