Compare commits

...

149 Commits
1.0.0 ... 1.4.0

Author SHA1 Message Date
Daniel García
dd00591082 Add info about how to fix #176 2018-11-09 16:24:45 +01:00
Daniel García
1e9dd2fd4e Merge pull request #244 from RomanHargrave/multiple-u2f
Make U2F work with vault 2.4.0 changes
2018-11-09 15:47:29 +01:00
Roman Hargrave
62bc58e145 Clean up after u2f endpoint split 2018-11-09 00:27:43 -06:00
Roman Hargrave
760e0ab805 Initial u2f fix 2018-11-09 00:00:31 -06:00
Daniel García
8f5bfe7938 Merge pull request #240 from mprasil/balenalib
Switch from resin to balenalib
2018-11-03 14:48:38 +01:00
Miroslav Prasil
b359df7045 Switch from resin to balenalib 2018-11-03 10:25:15 +01:00
Daniel García
f1b1000600 Merge pull request #239 from mprasil/admin_warning
Improve the admin account warning.
2018-11-01 19:03:05 +01:00
Miroslav Prasil
c0e248c457 Improve the admin account warning. 2018-11-01 18:54:33 +01:00
mprasil
f510a1b060 Merge pull request #236 from xMateMCPE/patch-1
Grammar fixes to README.md
2018-10-28 23:06:10 +00:00
xMateMCPE
fafc3883c6 Update README.md 2018-10-28 22:32:32 +00:00
Daniel García
1bdb98d139 Merge pull request #234 from icicimov/kubernetes-aws
Add Kubernetes deployment reference
2018-10-28 16:36:42 +01:00
icicimov
2f5ca88fb1 Readme typo fix 2018-10-28 18:16:22 +11:00
icicimov
e7a24159c5 Add Kubernetes deployment reference with proper section and short description 2018-10-27 12:42:56 +11:00
icicimov
e056cc8178 Add Kubernetes deployment reference 2018-10-26 22:37:35 +11:00
Daniel García
8ce4c79612 Merge pull request #233 from Toucan-Sam/patch-2
Update PROXY.md
2018-10-26 00:37:34 +02:00
Toucan-Sam
77d9641323 Update PROXY.md
Removed: `include conf.d/proxy-confs/proxy.conf;` lines because they're specific to user (shauder) and will break nginx if copy-pasted/don't exist.
Changed: Moved listen value and server_name to top as is standard for nginx configs
Changed: Commented out SSL config as it's specific to user (shauder) and will break if copy-pasted/don't exist. But is still useful and a good idea for simplifying nginx config.
Changed: Rearranged location blocks because OCD. First /, then /notifications/hub, then /notifications/hub/negotiate because it looks nicer in a tree where each location grows.
2018-10-25 19:45:22 +13:00
Daniel García
31e4237247 Merge pull request #231 from janost/fix-save-equivalent-domains
Accept PUT and POST on /settings/domains, returns JsonResult, fixes saving Custom Equivalent Domains
2018-10-23 01:30:28 +02:00
janost
c32c65d367 Accept PUT and POST on /settings/domains, returns JsonResult, fixes saving Custom Equivalent Domains 2018-10-23 00:32:43 +02:00
Daniel García
0a4dbaf307 Merge pull request #226 from janost/fix-sync-without-query-string
Fix /sync without query string
2018-10-19 01:34:32 +02:00
janost
daa66b08dc Fix /sync without query string 2018-10-19 00:54:40 +02:00
Daniel García
d613fa1e68 Merge pull request #225 from janost/sync-exclude-domains
Don't send Domains if excludeDomains=true on /sync
2018-10-17 23:49:09 +02:00
janost
55fbd8d468 Don't send Domains if excludeDomains=true on /sync 2018-10-17 23:22:07 +02:00
Daniel García
adf40291e8 Update web vault to 2.4.0 2018-10-16 16:20:56 +02:00
Daniel García
acfc900997 Merge pull request #224 from fbartels/patch-2
refine backup instructions
2018-10-15 22:56:16 +02:00
Felix Bartels
0a08b1afc8 refine backup instructions
as mentioned in https://github.com/dani-garcia/bitwarden_rs/issues/223
2018-10-15 21:47:22 +02:00
Daniel García
eb48a3fac2 Merge pull request #221 from mprasil/ws_disable
Disable WebSockets negotiation by default
2018-10-15 16:20:07 +02:00
Miroslav Prasil
2e7fa6440b Do not spawn WS thread if it's disabled 2018-10-15 15:08:15 +01:00
Miroslav Prasil
9ecc98c3cc Disable WebSockets negotiation by default 2018-10-14 23:25:16 +01:00
Daniel García
02fd68d63b Merge pull request #218 from janost/refactor-folder-save
Folder::save() should return QueryResult instead of bool
2018-10-14 20:07:27 +02:00
Daniel García
235bce1ecb Merge pull request #220 from janost/refactor-user-save
User::save() should return QueryResult instead of bool
2018-10-14 20:07:04 +02:00
janost
e985221b50 User::save() should return QueryResult instead of bool 2018-10-14 19:33:12 +02:00
janost
77cf63c06d Folder::save() should return QueryResult instead of bool 2018-10-14 18:25:04 +02:00
Daniel García
faec050a6d Merge pull request #217 from janost/refactor-device-save
Device::save() should return QueryResult instead of bool
2018-10-14 17:35:59 +02:00
Daniel García
22304f4925 Merge pull request #219 from janost/refactor-organization-save
Organization::save() and UserOrganization::save() should return QueryResult instead of bool
2018-10-14 17:35:30 +02:00
janost
58a78ffa54 Device::save() should return QueryResult instead of bool 2018-10-14 16:17:37 +02:00
janost
64f6c60bfd Organization::save() and UserOrganization::save() should return QueryResult instead of bool 2018-10-14 16:04:23 +02:00
Daniel García
e0614620ef Merge pull request #216 from mprasil/superuser
Implement poor man's admin panel
2018-10-13 17:16:06 +02:00
Miroslav Prasil
a28caa33ef Implement poor man's admin panel 2018-10-12 15:20:10 +01:00
Daniel García
ce4fedf191 Change error response to be closer to upstream 2018-10-10 20:37:04 +02:00
Daniel García
f2078a3849 Merge pull request #213 from janost/refactor-collectioncipher-save-delete
CollectionCipher::save() and delete() should return QueryResult instead of bool
2018-10-07 16:06:47 +02:00
janost
5292d38c73 CollectionCipher::save() and delete() should return QueryResult instead of bool 2018-10-07 11:06:11 +02:00
Daniel García
1049646e27 Merge pull request #210 from janost/cipher-save-refactor
Cipher::save() should return QueryResult instead of bool
2018-10-06 16:08:04 +02:00
janost
380cf06211 Cipher::save() should return QueryResult instead of bool 2018-10-06 14:13:49 +02:00
mprasil
1f35ef2865 Merge pull request #209 from janost/resend-invitation-error-message
Return proper error message for org reinvite
2018-10-05 23:21:08 +01:00
janost
c29bc9309a Return proper error message for org reinvite 2018-10-05 12:29:41 +02:00
Daniel García
7112c86471 Updated dependencies, removed valid mail check (now done by lettre), and updated global domains file 2018-10-04 00:01:04 +02:00
Daniel García
2aabf14372 Merge pull request #206 from mprasil/collection_revision
Collection update updates User revision
2018-10-01 19:31:43 +02:00
Daniel García
77ff9c91c5 Merge pull request #207 from mprasil/continuation_token
Add continuation token when we return object list
2018-10-01 19:31:11 +02:00
Miroslav Prasil
d9457e929c Add continuation token when we return object list 2018-10-01 17:55:48 +01:00
Miroslav Prasil
86b49856a7 Handle return value from Collection::save() 2018-10-01 17:50:31 +01:00
Miroslav Prasil
54f54ee845 Update revision for users on collection save 2018-10-01 17:04:15 +01:00
Daniel García
015bd28cc2 Merge pull request #201 from mprasil/aarch64
Add dockerfile for aarch64 (arm64)
2018-10-01 17:40:20 +02:00
Daniel García
990c83a037 Merge pull request #204 from mprasil/org_improvements
Org improvements
2018-10-01 17:40:14 +02:00
Miroslav Prasil
c3c74506a7 Add missing fields to returned Org json 2018-10-01 16:00:11 +01:00
Miroslav Prasil
fb4e6bab14 Clean up the share_collection handling 2018-10-01 15:59:10 +01:00
Miroslav Prasil
fe38f95f15 Add dockerfile for aarch64 (arm64) 2018-09-28 13:01:47 +01:00
Daniel García
9eaa9c1a17 Add WEBSOCKET_ADDRESS config option (Fixes #199) 2018-09-28 13:50:04 +02:00
Daniel García
8ee681c4a3 Merge pull request #200 from mprasil/copy_fix
Remove unecessary copy from armv7 Dockerfile
2018-09-28 13:31:40 +02:00
Miroslav Prasil
08aee97c1d Remove unecessary copy from armv7 Dockerfile 2018-09-28 09:49:10 +01:00
Daniel García
2bb6482bec Merge branch 'openssl-fix' 2018-09-20 22:52:58 +02:00
Daniel García
c169095128 Update dependencies to point to upstream lettre 2018-09-20 22:45:19 +02:00
Daniel García
b1397c95ca Remove unnecessary path in PROXY.md 2018-09-19 22:33:12 +02:00
Daniel García
3df31e3464 Temp fix for OpenSSL 1.1.1 compatibility 2018-09-19 21:45:50 +02:00
Daniel García
638a0fd3c3 Updated dependencies 2018-09-19 21:43:03 +02:00
Daniel García
ebb66c374e Implement KDF iterations change (Fixes #195) 2018-09-19 17:30:14 +02:00
Daniel García
89e3c41043 Merge pull request #191 from mprasil/vault_2.3.0
Update Vault to v2.3.0
2018-09-18 16:03:10 +02:00
Miroslav Prasil
3da410ef71 Update Vault to v2.3.0 2018-09-18 13:53:25 +01:00
Daniel García
2dccbd3412 Merge pull request #190 from mprasil/invite_readme
Update the Invitation workflow documentation
2018-09-18 14:36:39 +02:00
Daniel García
2ff529ed99 Merge pull request #189 from mprasil/delete_fix
Add alias for DELETE call on accounts
2018-09-18 14:36:31 +02:00
Miroslav Prasil
4fae1e4298 Update the Invitation workflow documentation 2018-09-18 11:49:20 +01:00
Miroslav Prasil
f7951b44ba Add alias for DELETE call on accounts 2018-09-18 11:13:45 +01:00
Daniel García
ff8eeff995 Merge pull request #184 from mprasil/code_block_fix
Fixed code block, added some formatting
2018-09-16 15:44:05 +02:00
Miroslav Prasil
00019dc356 Added some formating and link 2018-09-16 12:36:08 +01:00
Miroslav Prasil
404fe5321e Fixed code block 2018-09-16 12:27:29 +01:00
Daniel García
e7dd239d20 Merge pull request #182 from dobunzli/master
Update README.md
2018-09-15 20:03:58 +02:00
Daniel García
071f3370e3 Merge pull request #183 from jkaberg/traefik_example_proxy
traefik proxy example
2018-09-15 20:03:34 +02:00
Joel Kåberg
ee321be579 traefik example 2018-09-14 23:22:38 +02:00
dobunzli
eb61425da5 Update README.md
Added infos about enabling https when softwares getting certs are using symlinks
2018-09-14 22:39:58 +02:00
Daniel García
b75ba216d1 Return default prelogin values when the user doesn't exist 2018-09-13 23:04:52 +02:00
Daniel García
8651df8c2a Fixed some lint issues 2018-09-13 21:55:23 +02:00
Daniel García
948554a20f Added config option for websocket port, and reworked the config parsing a bit.
Added SMTP_FROM config to examples and made it mandatory, it doesn't make much sense to not specify the from address.
2018-09-13 20:59:51 +02:00
Daniel García
9cdb605659 Include more proxy examples 2018-09-13 17:08:16 +02:00
Daniel García
928e2424c0 Updated dependencies and fixed errors 2018-09-13 16:05:13 +02:00
Daniel García
a01fee0b9f Merge branch 'ws'
# Conflicts:
#	Cargo.toml
#	src/api/core/ciphers.rs
#	src/main.rs
2018-09-13 15:59:45 +02:00
Daniel García
924e4a17e5 Merge pull request #175 from stammw/master
Documentation for SMTP and password hint configuration
2018-09-13 15:46:52 +02:00
Daniel García
fdbd73c716 Merge branch 'master' into master 2018-09-13 15:39:28 +02:00
Daniel García
f397f0cbd0 Implement organization import for admins and owners (Fixes #178) 2018-09-13 15:16:24 +02:00
Daniel García
4d2c6e39b2 Merge pull request #177 from mprasil/raspberry
Add Dockerfile for Raspberry Pi
2018-09-13 00:19:15 +02:00
Daniel García
3e1afb139c Remove unnecessary return 2018-09-12 23:58:02 +02:00
Jean-Christophe BEGUE
af69606bea Documentation for SMTP and password hint configuration 2018-09-12 21:19:29 +02:00
Miroslav Prasil
bc8ff14695 Fix the binary path 2018-09-12 13:51:43 +01:00
Miroslav Prasil
5f7b220eb4 Initial shot as cross compilation 2018-09-12 12:15:26 +01:00
Daniel García
67adfee5e5 Some documentation 2018-09-11 17:27:04 +02:00
Daniel García
d66d4fd87f Add error message when the proxy doesn't route websockets correctly 2018-09-11 17:09:33 +02:00
Daniel García
1b20a25514 Merge pull request #173 from mprasil/poormans_invites
Implement poor man's invitation via Organization invitation
2018-09-11 16:48:56 +02:00
Miroslav Prasil
c1cd4d9a6b Modify User::new to be keyless and paswordless 2018-09-11 14:25:12 +01:00
Daniel García
b63693aefb Merge pull request #137 from stammw/master
SMTP implementation, along with password HINT email
2018-09-11 14:58:09 +02:00
Miroslav Prasil
ec05f14f5a Implement poor man's invitation via Organization invitation 2018-09-11 13:09:59 +01:00
Jean-Christophe BEGUE
37d88be2be return an error when email adress for password hint is not valid 2018-09-11 13:12:24 +02:00
Jean-Christophe BEGUE
1c641d7635 Special messages when user has no password hint 2018-09-11 13:04:34 +02:00
Jean-Christophe BEGUE
e2ab2f7306 Save None instead of empty password hint 2018-09-11 13:00:59 +02:00
Daniel García
434551e012 Merge pull request #171 from shauder/ws
Expose 3012 in docker build file for notifications
2018-09-04 21:18:16 +02:00
Daniel García
69dcbdd3b2 Merge branch 'master' into ws 2018-09-04 17:46:38 +02:00
Daniel García
8df6f79f19 Merge pull request #170 from mprasil/org-user-edit
Fix editing users in Organization
2018-09-04 17:32:16 +02:00
Shane A. Faulkner
422f7ccfa8 Expose 3012 in docker build file for notifications 2018-09-04 10:22:17 -05:00
Miroslav Prasil
c58682e3fb Fix the logic in user edditing 2018-09-04 16:10:26 +01:00
Miroslav Prasil
db111ae2a0 Check properly the user membership in Organization 2018-09-04 13:37:44 +01:00
Miroslav Prasil
049aa33f17 Fix editing users in Organization 2018-09-04 12:15:46 +01:00
Daniel García
b1ac37609f Merge pull request #169 from mprasil/http_warning
Add info on running over HTTP (documentation for #153)
2018-09-03 13:47:05 +02:00
Miroslav Prasil
53e8f78af6 Link to the https setup 2018-09-03 10:59:59 +01:00
Miroslav Prasil
1bced97e04 Add info on running over HTTP (documentation for #153) 2018-09-03 10:53:52 +01:00
Daniel García
f8ae5013cb Merge pull request #167 from shauder/ws
Add support for cipher update notifications
2018-09-02 00:17:40 +02:00
Shane A. Faulkner
d8e5e53273 Add notifications for cipher delete and create 2018-09-01 10:59:13 -05:00
Shane A. Faulkner
b6502e9e9d Add support for CipherUpdate notifications 2018-08-31 23:30:53 -05:00
Daniel García
d70864ac73 Initial version of websockets notification support.
For now only folder notifications are sent (create, rename, delete).
The notifications are only tested between two web-vault sessions in different browsers, mobile apps and browser extensions are untested.

The websocket server is exposed in port 3012, while the rocket server is exposed in another port (8000 by default). To make notifications work, both should be accessible in the same port, which requires a reverse proxy.

My testing is done with Caddy server, and the following config:

```
localhost {

    # The negotiation endpoint is also proxied to Rocket
    proxy /notifications/hub/negotiate 0.0.0.0:8000 {
        transparent
    }

    # Notifications redirected to the websockets server
    proxy /notifications/hub 0.0.0.0:3012 {
        websocket
    }

    # Proxy the Root directory to Rocket
    proxy / 0.0.0.0:8000 {
        transparent
    }
}
```

This exposes the service in port 2015.
2018-08-30 17:58:53 +02:00
Daniel García
f94e626021 Merge pull request #166 from mprasil/alpine
Alpine
2018-08-30 16:47:58 +02:00
Daniel García
0a3b84b815 Merge pull request #165 from mprasil/shared_edit_fix
Fix editing shared cipher (fixes #164)
2018-08-30 16:47:08 +02:00
Miroslav Prasil
d336d89b83 Fix editing shared cipher (fixes #164) 2018-08-30 11:12:29 +01:00
Miroslav Prasil
1a5c1979e3 Move Alpine Dockerfile to separate file 2018-08-30 10:38:38 +01:00
Miroslav Prasil
cec9566d2a Merge branch 'master' into alpine 2018-08-29 15:06:50 +01:00
Baelyk
fe473b9e75 Attachment::save() returns Result instead of bool (#161)
Returning a result instead of a bool as per #6
2018-08-29 15:22:19 +02:00
mprasil
062ae4dd59 Allow non-Admin user to share to collection (fixes #157) (#159)
* Allow non-Admin user to share to collection (fixes #157)

* Better handling of collection sharing
2018-08-29 15:22:03 +02:00
Miroslav Prasil
45d676eb10 Merge branch 'master' into alpine 2018-08-29 10:07:09 +01:00
mprasil
3cfdf9b585 Add DELETE handlers fo cipher and attachment deletion (fixes #158) (#160) 2018-08-29 00:48:53 +02:00
Miroslav Prasil
08b551624c Merge branch 'master' into alpine 2018-08-28 14:06:54 +01:00
Daniel García
761a0a3393 Removed accidental change to Dockerfile 2018-08-28 12:54:57 +02:00
Daniel García
6660b0aef3 Updated web vault to version 2.2 2018-08-28 03:22:13 +02:00
Kumar Ankur
781056152a Support password history #155 (#156)
* Password History Support (#155)

* down.sql logic not required as per review comments
2018-08-27 23:08:58 +02:00
Miroslav Prasil
6822bb28a0 Merge branch 'master' into alpine 2018-08-26 16:58:46 +01:00
Daniel García
b82710eecf Merge pull request #152 from Baelyk/master
Add ip and username to failed login attempts
2018-08-26 17:43:50 +02:00
Baelyk
c386b3bcf7 Add IP and Username to failed login attempts
Resolves #119
2018-08-25 17:07:59 -05:00
Miroslav Prasil
ffec0b065b Updated build image version 2018-08-25 09:29:50 +01:00
Miroslav Prasil
5b7fe9f155 Merge branch 'master' into alpine 2018-08-24 23:17:52 +01:00
Daniel García
8d1ee859f2 Implemented basic support for prelogin and notification negotiation 2018-08-24 19:02:34 +02:00
Daniel García
c91f80c456 Fixed rust toolchain date 2018-08-24 17:12:04 +02:00
Daniel García
39891e86a0 Updated dependencies, added Travis CI integration and some badges 2018-08-24 17:07:11 +02:00
Miroslav Prasil
575f701390 Merge branch 'master' into alpine 2018-08-23 21:59:23 +01:00
Daniel García
335099cd30 Merge pull request #150 from mprasil/build_instructions
Update the build instruction for new Vault
2018-08-23 16:05:24 +02:00
Miroslav Prasil
9fad541c87 Clone repository instead of downloading as suggested by @mqus 2018-08-23 12:08:54 +01:00
Miroslav Prasil
007e053e2f Update the build instruction for new Vault 2018-08-23 11:06:32 +01:00
Miroslav Prasil
ef2413a5aa Fix SSL issue, rm cache 2018-08-21 22:08:16 +01:00
Miroslav Prasil
ca8e1c646d Update build image 2018-08-21 22:08:16 +01:00
Miroslav Prasil
346c7630c9 Initial implementation of musl build on top of Alpine 2018-08-21 22:08:16 +01:00
Jean-Christophe BEGUE
9e63985b28 Check email validity before using it for password hint sending 2018-08-16 21:25:28 +02:00
Jean-Christophe BEGUE
401aa7c699 make SMTP authentication optionnal, let lettre pick the better auth mechanism 2018-08-15 17:21:19 +02:00
Jean-Christophe BEGUE
d68f57cbba Fix password hint showing logic 2018-08-15 14:08:00 +02:00
Jean-Christophe BEGUE
19e0605d30 Better message into the password hint email 2018-08-15 10:17:05 +02:00
Jean-Christophe BEGUE
812387e586 SMTP integration, send password hint by email. 2018-08-15 08:45:18 +02:00
Jean-Christophe BEGUE
f7ffb81d9e SMTP configuration parsing and checking 2018-08-13 13:46:32 +02:00
43 changed files with 3105 additions and 1033 deletions

13
.env
View File

@@ -14,6 +14,10 @@
# WEB_VAULT_FOLDER=web-vault/ # WEB_VAULT_FOLDER=web-vault/
# WEB_VAULT_ENABLED=true # WEB_VAULT_ENABLED=true
## Controls the WebSocket server address and port
# WEBSOCKET_ADDRESS=0.0.0.0
# WEBSOCKET_PORT=3012
## Controls if new users can register ## Controls if new users can register
# SIGNUPS_ALLOWED=true # SIGNUPS_ALLOWED=true
@@ -41,3 +45,12 @@
# ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app # ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app
# ROCKET_PORT=8000 # ROCKET_PORT=8000
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"} # ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
# SMTP_HOST=smtp.domain.tld
# SMTP_FROM=bitwarden-rs@domain.tld
# SMTP_PORT=587
# SMTP_SSL=true
# SMTP_USERNAME=username
# SMTP_PASSWORD=password

7
.travis.yml Normal file
View File

@@ -0,0 +1,7 @@
# Copied from Rocket's .travis.yml
language: rust
sudo: required # so we get a VM with higher specs
dist: trusty # so we get a VM with higher specs
cache: cargo
rust:
- nightly

View File

@@ -17,28 +17,29 @@ cargo build --release
When run, the server is accessible in [http://localhost:80](http://localhost:80). When run, the server is accessible in [http://localhost:80](http://localhost:80).
### Install the web-vault ### Install the web-vault
Download the latest official release from the [releases page](https://github.com/bitwarden/web/releases) and extract it. Clone the git repository at [bitwarden/web](https://github.com/bitwarden/web) and checkout the latest release tag (e.g. v2.1.1):
Modify `web-vault/settings.Production.json` to look like this:
```json
{
"appSettings": {
"apiUri": "/api",
"identityUri": "/identity",
"iconsUri": "/icons",
"stripeKey": "",
"braintreeKey": ""
}
}
```
Then, run the following from the `web-vault` directory:
```sh ```sh
npm install # clone the repository
npx gulp dist:selfHosted git clone https://github.com/bitwarden/web.git web-vault
cd web-vault
# switch to the latest tag
git checkout "$(git tag | tail -n1)"
``` ```
Finally copy the contents of the `web-vault/dist` folder into the `bitwarden_rs/web-vault` folder. Apply the patch file from `docker/set-vault-baseurl.patch`:
```sh
# In the Vault repository directory
git apply /path/to/bitwarden_rs/docker/set-vault-baseurl.patch
```
Then, build the Vault:
```sh
npm run sub:init
npm install
npm run dist
```
Finally copy the contents of the `build` folder into the `bitwarden_rs/web-vault` folder.
# Configuration # Configuration
The available configuration options are documented in the default `.env` file, and they can be modified by uncommenting the desired options in that file or by setting their respective environment variables. Look at the README file for the main configuration options available. The available configuration options are documented in the default `.env` file, and they can be modified by uncommenting the desired options in that file or by setting their respective environment variables. Look at the README file for the main configuration options available.

1484
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -5,36 +5,45 @@ authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
[dependencies] [dependencies]
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed. # Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
rocket = { version = "0.3.15", features = ["tls"] } rocket = { version = "0.3.17", features = ["tls"] }
rocket_codegen = "0.3.15" rocket_codegen = "0.3.17"
rocket_contrib = "0.3.15" rocket_contrib = "0.3.17"
# HTTP client # HTTP client
reqwest = "0.8.6" reqwest = "0.9.2"
# multipart/form-data support # multipart/form-data support
multipart = "0.15.0" multipart = "0.15.3"
# WebSockets library
ws = "0.7.8"
# MessagePack library
rmpv = "0.4.0"
# Concurrent hashmap implementation
chashmap = "2.2.0"
# A generic serialization/deserialization framework # A generic serialization/deserialization framework
serde = "1.0.70" serde = "1.0.79"
serde_derive = "1.0.70" serde_derive = "1.0.79"
serde_json = "1.0.24" serde_json = "1.0.31"
# A safe, extensible ORM and Query builder # A safe, extensible ORM and Query builder
diesel = { version = "1.3.2", features = ["sqlite", "chrono", "r2d2"] } diesel = { version = "1.3.3", features = ["sqlite", "chrono", "r2d2"] }
diesel_migrations = { version = "1.3.0", features = ["sqlite"] } diesel_migrations = { version = "1.3.0", features = ["sqlite"] }
# Bundled SQLite # Bundled SQLite
libsqlite3-sys = { version = "0.9.1", features = ["bundled"] } libsqlite3-sys = { version = "0.9.3", features = ["bundled"] }
# Crypto library # Crypto library
ring = { version = "= 0.11.0", features = ["rsa_signing"] } ring = { version = "= 0.11.0", features = ["rsa_signing"] }
# UUID generation # UUID generation
uuid = { version = "0.6.5", features = ["v4"] } uuid = { version = "0.7.1", features = ["v4"] }
# Date and time library for Rust # Date and time library for Rust
chrono = "0.4.4" chrono = "0.4.6"
# TOTP library # TOTP library
oath = "0.10.2" oath = "0.10.2"
@@ -52,15 +61,26 @@ u2f = "0.1.2"
dotenv = { version = "0.13.0", default-features = false } dotenv = { version = "0.13.0", default-features = false }
# Lazy static macro # Lazy static macro
lazy_static = "1.0.2" lazy_static = "1.1.0"
# Numerical libraries # Numerical libraries
num-traits = "0.2.5" num-traits = "0.2.6"
num-derive = "0.2.2" num-derive = "0.2.3"
# Email libraries
lettre = "0.9.0"
lettre_email = "0.9.0"
native-tls = "0.2.1"
# Number encoding library
byteorder = "1.2.6"
[patch.crates-io] [patch.crates-io]
# Make jwt use ring 0.11, to match rocket # Make jwt use ring 0.11, to match rocket
jsonwebtoken = { path = "libs/jsonwebtoken" } jsonwebtoken = { path = "libs/jsonwebtoken" }
rmp = { git = 'https://github.com/dani-garcia/msgpack-rust' }
lettre = { git = 'https://github.com/lettre/lettre', rev = 'c988b1760ad81' }
lettre_email = { git = 'https://github.com/lettre/lettre', rev = 'c988b1760ad81' }
# Version 0.1.2 from crates.io lacks a commit that fixes a certificate error # Version 0.1.2 from crates.io lacks a commit that fixes a certificate error
u2f = { git = 'https://github.com/wisespace-io/u2f-rs', rev = '193de35093a44' } u2f = { git = 'https://github.com/wisespace-io/u2f-rs', rev = '193de35093a44' }

View File

@@ -4,7 +4,7 @@
####################### VAULT BUILD IMAGE ####################### ####################### VAULT BUILD IMAGE #######################
FROM node:8-alpine as vault FROM node:8-alpine as vault
ENV VAULT_VERSION "v2.1.1" ENV VAULT_VERSION "v2.4.0"
ENV URL "https://github.com/bitwarden/web.git" ENV URL "https://github.com/bitwarden/web.git"
@@ -76,6 +76,7 @@ RUN apt-get update && apt-get install -y\
RUN mkdir /data RUN mkdir /data
VOLUME /data VOLUME /data
EXPOSE 80 EXPOSE 80
EXPOSE 3012
# Copies the files from the context (env file and web-vault) # Copies the files from the context (env file and web-vault)
# and the binary from the "build" stage to the current stage # and the binary from the "build" stage to the current stage

112
Dockerfile.aarch64 Normal file
View File

@@ -0,0 +1,112 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM node:8-alpine as vault
ENV VAULT_VERSION "v2.4.0"
ENV URL "https://github.com/bitwarden/web.git"
RUN apk add --update-cache --upgrade \
curl \
git \
tar
RUN git clone -b $VAULT_VERSION --depth 1 $URL web-build
WORKDIR /web-build
COPY /docker/set-vault-baseurl.patch /web-build/
RUN git apply set-vault-baseurl.patch
RUN npm run sub:init && npm install
RUN npm run dist \
&& mv build /web-vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust as build
RUN apt-get update \
&& apt-get install -y \
gcc-aarch64-linux-gnu \
&& mkdir -p ~/.cargo \
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin app
WORKDIR /app
# Copies over *only* your manifests and vendored dependencies
COPY ./Cargo.* ./
COPY ./libs ./libs
COPY ./rust-toolchain ./rust-toolchain
# Prepare openssl arm64 libs
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture arm64 \
&& apt-get update \
&& apt-get install -y \
libssl-dev:arm64 \
libc6-dev:arm64
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN rustup target add aarch64-unknown-linux-gnu
RUN cargo build --release --target=aarch64-unknown-linux-gnu -v
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --release --target=aarch64-unknown-linux-gnu -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/aarch64-debian:stretch
ENV ROCKET_ENV "staging"
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y\
openssl\
ca-certificates\
--no-install-recommends\
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
# Copies the files from the context (env file and web-vault)
# and the binary from the "build" stage to the current stage
COPY .env .
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
# Configures the startup!
CMD ./bitwarden_rs

81
Dockerfile.alpine Normal file
View File

@@ -0,0 +1,81 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM node:8-alpine as vault
ENV VAULT_VERSION "v2.4.0"
ENV URL "https://github.com/bitwarden/web.git"
RUN apk add --update-cache --upgrade \
curl \
git \
tar
RUN git clone -b $VAULT_VERSION --depth 1 $URL web-build
WORKDIR /web-build
COPY /docker/set-vault-baseurl.patch /web-build/
RUN git apply set-vault-baseurl.patch
RUN npm run sub:init && npm install
RUN npm run dist \
&& mv build /web-vault
########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary
FROM clux/muslrust:nightly-2018-08-24 as build
# Creates a dummy project used to grab dependencies
RUN USER=root cargo init --bin
# Copies over *only* your manifests and vendored dependencies
COPY ./Cargo.* ./
COPY ./libs ./libs
COPY ./rust-toolchain ./rust-toolchain
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM alpine:3.8
ENV ROCKET_ENV "staging"
ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs
# Install needed libraries
RUN apk add \
openssl\
ca-certificates \
&& rm /var/cache/apk/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (env file and web-vault)
# and the binary from the "build" stage to the current stage
COPY .env .
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /volume/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
# Configures the startup!
CMD ./bitwarden_rs

112
Dockerfile.armv7 Normal file
View File

@@ -0,0 +1,112 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM node:8-alpine as vault
ENV VAULT_VERSION "v2.4.0"
ENV URL "https://github.com/bitwarden/web.git"
RUN apk add --update-cache --upgrade \
curl \
git \
tar
RUN git clone -b $VAULT_VERSION --depth 1 $URL web-build
WORKDIR /web-build
COPY /docker/set-vault-baseurl.patch /web-build/
RUN git apply set-vault-baseurl.patch
RUN npm run sub:init && npm install
RUN npm run dist \
&& mv build /web-vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust as build
RUN apt-get update \
&& apt-get install -y \
gcc-arm-linux-gnueabihf \
&& mkdir -p ~/.cargo \
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin app
WORKDIR /app
# Copies over *only* your manifests and vendored dependencies
COPY ./Cargo.* ./
COPY ./libs ./libs
COPY ./rust-toolchain ./rust-toolchain
# Prepare openssl armhf libs
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armhf \
&& apt-get update \
&& apt-get install -y \
libssl-dev:armhf \
libc6-dev:armhf
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN rustup target add armv7-unknown-linux-gnueabihf
RUN cargo build --release --target=armv7-unknown-linux-gnueabihf -v
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --release --target=armv7-unknown-linux-gnueabihf -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/armv7hf-debian:stretch
ENV ROCKET_ENV "staging"
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y\
openssl\
ca-certificates\
--no-install-recommends\
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
# Copies the files from the context (env file and web-vault)
# and the binary from the "build" stage to the current stage
COPY .env .
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
# Configures the startup!
CMD ./bitwarden_rs

94
PROXY.md Normal file
View File

@@ -0,0 +1,94 @@
# Proxy examples
In this document, `<SERVER>` refers to the IP or domain where bitwarden_rs is accessible from. If both the proxy and bitwarden_rs are running in the same system, simply use `localhost`.
The ports proxied by default are `80` for the web server and `3012` for the WebSocket server. The proxies are configured to listen in port `443` with HTTPS enabled, which is recommended.
When using a proxy, it's preferrable to configure HTTPS at the proxy level and not at the application level, this way the WebSockets connection is also secured.
## Caddy
```nginx
localhost:443 {
# The negotiation endpoint is also proxied to Rocket
proxy /notifications/hub/negotiate <SERVER>:80 {
transparent
}
# Notifications redirected to the websockets server
proxy /notifications/hub <SERVER>:3012 {
websocket
}
# Proxy the Root directory to Rocket
proxy / <SERVER>:80 {
transparent
}
tls ${SSLCERTIFICATE} ${SSLKEY}
}
```
## Nginx (by shauder)
```nginx
server {
listen 443 ssl http2;
server_name vault.*;
# Specify SSL config if using a shared one.
#include conf.d/ssl/ssl.conf;
location / {
proxy_pass http://<SERVER>:80;
}
location /notifications/hub {
proxy_pass http://<SERVER>:3012;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
location /notifications/hub/negotiate {
proxy_pass http://<SERVER>:80;
}
}
```
## Apache (by fbartels)
```apache
<VirtualHost *:443>
SSLEngine on
ServerName bitwarden.$hostname.$domainname
SSLCertificateFile ${SSLCERTIFICATE}
SSLCertificateKeyFile ${SSLKEY}
SSLCACertificateFile ${SSLCA}
${SSLCHAIN}
ErrorLog \${APACHE_LOG_DIR}/bitwarden-error.log
CustomLog \${APACHE_LOG_DIR}/bitwarden-access.log combined
RewriteEngine On
RewriteCond %{HTTP:Upgrade} =websocket [NC]
RewriteRule /(.*) ws://<SERVER>:3012/$1 [P,L]
ProxyPass / http://<SERVER>:80/
ProxyPreserveHost On
ProxyRequests Off
</VirtualHost>
```
## Traefik (docker-compose example)
```traefik
labels:
- 'traefik.frontend.rule=Host:vault.example.local'
- 'traefik.docker.network=traefik'
- 'traefik.port=80'
- 'traefik.enable=true'
- 'traefik.web.frontend.rule=Host:vault.example.local'
- 'traefik.web.port=80'
- 'traefik.hub.frontend.rule=Path:/notifications/hub'
- 'traefik.hub.port=3012'
- 'traefik.negotiate.frontend.rule=Path:/notifications/hub/negotiate'
- 'traefik.negotiate.port=80'
```

150
README.md
View File

@@ -1,9 +1,19 @@
This is Bitwarden server API implementation written in rust compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, ideal for self-hosted deployment where running official resource-heavy service might not be ideal. ### This is a Bitwarden server API implementation written in Rust compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
---
[![Travis Build Status](https://travis-ci.org/dani-garcia/bitwarden_rs.svg?branch=master)](https://travis-ci.org/dani-garcia/bitwarden_rs)
[![Dependency Status](https://deps.rs/repo/github/dani-garcia/bitwarden_rs/status.svg)](https://deps.rs/repo/github/dani-garcia/bitwarden_rs)
[![GitHub Release](https://img.shields.io/github/release/dani-garcia/bitwarden_rs.svg)](https://github.com/dani-garcia/bitwarden_rs/releases/latest)
[![GPL-3.0 Licensed](https://img.shields.io/github/license/dani-garcia/bitwarden_rs.svg)](https://github.com/dani-garcia/bitwarden_rs/blob/master/LICENSE.txt)
[![Matrix Chat](https://matrix.to/img/matrix-badge.svg)](https://matrix.to/#/#bitwarden_rs:matrix.org)
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/bitwarden_rs). Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/bitwarden_rs).
_*Note, that this project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC._ _*Note, that this project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC._
---
**Table of contents** **Table of contents**
- [Features](#features) - [Features](#features)
@@ -13,7 +23,10 @@ _*Note, that this project is not associated with the [Bitwarden](https://bitward
- [Updating the bitwarden image](#updating-the-bitwarden-image) - [Updating the bitwarden image](#updating-the-bitwarden-image)
- [Configuring bitwarden service](#configuring-bitwarden-service) - [Configuring bitwarden service](#configuring-bitwarden-service)
- [Disable registration of new users](#disable-registration-of-new-users) - [Disable registration of new users](#disable-registration-of-new-users)
- [Disable invitations](#disable-invitations)
- [Configure server administrator](#configure-server-administrator)
- [Enabling HTTPS](#enabling-https) - [Enabling HTTPS](#enabling-https)
- [Enabling WebSocket notifications](#enabling-websocket-notifications)
- [Enabling U2F authentication](#enabling-u2f-authentication) - [Enabling U2F authentication](#enabling-u2f-authentication)
- [Changing persistent data location](#changing-persistent-data-location) - [Changing persistent data location](#changing-persistent-data-location)
- [/data prefix:](#data-prefix) - [/data prefix:](#data-prefix)
@@ -22,12 +35,15 @@ _*Note, that this project is not associated with the [Bitwarden](https://bitward
- [icons cache](#icons-cache) - [icons cache](#icons-cache)
- [Changing the API request size limit](#changing-the-api-request-size-limit) - [Changing the API request size limit](#changing-the-api-request-size-limit)
- [Changing the number of workers](#changing-the-number-of-workers) - [Changing the number of workers](#changing-the-number-of-workers)
- [SMTP configuration](#smtp-configuration)
- [Password hint display](#password-hint-display)
- [Disabling or overriding the Vault interface hosting](#disabling-or-overriding-the-vault-interface-hosting) - [Disabling or overriding the Vault interface hosting](#disabling-or-overriding-the-vault-interface-hosting)
- [Other configuration](#other-configuration) - [Other configuration](#other-configuration)
- [Building your own image](#building-your-own-image) - [Building your own image](#building-your-own-image)
- [Building binary](#building-binary) - [Building binary](#building-binary)
- [Available packages](#available-packages) - [Available packages](#available-packages)
- [Arch Linux](#arch-linux) - [Arch Linux](#arch-linux)
- [Kubernetes deployment](#kubernetes-deployment)
- [Backing up your vault](#backing-up-your-vault) - [Backing up your vault](#backing-up-your-vault)
- [1. the sqlite3 database](#1-the-sqlite3-database) - [1. the sqlite3 database](#1-the-sqlite3-database)
- [2. the attachments folder](#2-the-attachments-folder) - [2. the attachments folder](#2-the-attachments-folder)
@@ -38,6 +54,7 @@ _*Note, that this project is not associated with the [Bitwarden](https://bitward
- [Changing user email](#changing-user-email) - [Changing user email](#changing-user-email)
- [Creating organization](#creating-organization) - [Creating organization](#creating-organization)
- [Inviting users into organization](#inviting-users-into-organization) - [Inviting users into organization](#inviting-users-into-organization)
- [Running on unencrypted connection](#running-on-unencrypted-connection)
- [Get in touch](#get-in-touch) - [Get in touch](#get-in-touch)
## Features ## Features
@@ -125,6 +142,37 @@ docker run -d --name bitwarden \
-p 80:80 \ -p 80:80 \
mprasil/bitwarden:latest mprasil/bitwarden:latest
``` ```
Note: While users can't register on their own, they can still be invited by already registered users. Read below if you also want to disable that.
### Disable invitations
Even when registration is disabled, organization administrators or owners can invite users to join organization. This won't send email invitation to the users, but after they are invited, they can register with the invited email even if `SIGNUPS_ALLOWED` is actually set to `false`. You can disable this functionality completely by setting `INVITATIONS_ALLOWED` env variable to `false`:
```sh
docker run -d --name bitwarden \
-e SIGNUPS_ALLOWED=false \
-e INVITATIONS_ALLOWED=false \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
### Configure server administrator
**Warning:** *Never* use your regular account for the admin functionality. This is a bit of a hack using the Vault interface for something it's not intended to do and it breaks any other functionality for the account. Please set up and use separate account just for this functionality.
You can configure one email account to be server administrator via the `SERVER_ADMIN_EMAIL` environment variable:
```sh
docker run -d --name bitwarden \
-e SERVER_ADMIN_EMAIL=admin@example.com \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
This will give the user extra functionality and privileges to manage users on the server. In the Vault, the user will see a special (virtual) organization called `bitwarden_rs`. This organization doesn't actually exist and can't be used for most things. (can't have collections or ciphers) Instead it just contains all the users registered on the server. Deleting users from this organization will actually completely delete the user from the server. Inviting users into this organization will just invite the user so they are able to register, but will not grant any organization membership. (unlike inviting user to regular organization)
You can think of the `bitwarden_rs` organization as sort of Admin interface to manage users on the server. Keep in mind that deleting user this way removes the user permanently without any way to restore the deleted data just as if user deleted their own account.
### Enabling HTTPS ### Enabling HTTPS
To enable HTTPS, you need to configure the `ROCKET_TLS`. To enable HTTPS, you need to configure the `ROCKET_TLS`.
@@ -147,6 +195,47 @@ docker run -d --name bitwarden \
``` ```
Note that you need to mount ssl files and you need to forward appropriate port. Note that you need to mount ssl files and you need to forward appropriate port.
Due to what is likely a certificate validation bug in Android, you need to make sure that your certificate includes the full chain of trust. In the case of certbot, this means using `fullchain.pem` instead of `cert.pem`.
Softwares used for getting certs are often using symlinks. If that is the case, both locations need to be accessible to the docker container.
Example: [certbot](https://certbot.eff.org/) will create a folder that contains the needed `fullchain.pem` and `privkey.pem` files in `/etc/letsencrypt/live/mydomain/`
These files are symlinked to `../../archive/mydomain/privkey.pem`
So to use from bitwarden container:
```sh
docker run -d --name bitwarden \
-e ROCKET_TLS='{certs="/ssl/live/mydomain/fullchain.pem",key="/ssl/live/mydomain/privkey.pem"}' \
-v /etc/letsencrypt/:/ssl/ \
-v /bw-data/:/data/ \
-p 443:80 \
mprasil/bitwarden:latest
```
### Enabling WebSocket notifications
*Important: This does not apply to the mobile clients, which use push notifications.*
To enable WebSockets notifications, an external reverse proxy is necessary, and it must be configured to do the following:
- Route the `/notifications/hub` endpoint to the WebSocket server, by default at port `3012`, making sure to pass the `Connection` and `Upgrade` headers. (Note the port can be changed with `WEBSOCKET_PORT` variable)
- Route everything else, including `/notifications/hub/negotiate`, to the standard Rocket server, by default at port `80`.
- If using Docker, you may need to map both ports with the `-p` flag
Example configurations are included in the [PROXY.md](https://github.com/dani-garcia/bitwarden_rs/blob/master/PROXY.md) file.
Then you need to enable WebSockets negotiation on the bitwarden_rs side by setting the `WEBSOCKET_ENABLED` variable to `true`:
```sh
docker run -d --name bitwarden \
-e WEBSOCKET_ENABLED=true \
-v /bw-data/:/data/ \
-p 80:80 \
-p 3012:3012 \
mprasil/bitwarden:latest
```
Note: The reason for this workaround is the lack of support for WebSockets from Rocket (though [it's a planned feature](https://github.com/SergioBenitez/Rocket/issues/90)), which forces us to launch a secondary server on a separate port.
### Enabling U2F authentication ### Enabling U2F authentication
To enable U2F authentication, you must be serving bitwarden_rs from an HTTPS domain with a valid certificate (Either using the included To enable U2F authentication, you must be serving bitwarden_rs from an HTTPS domain with a valid certificate (Either using the included
HTTPS options or with a reverse proxy). We recommend using a free certificate from Let's Encrypt. HTTPS options or with a reverse proxy). We recommend using a free certificate from Let's Encrypt.
@@ -242,7 +331,7 @@ docker run -d --name bitwarden \
When you run bitwarden_rs, it spawns `2 * <number of cpu cores>` workers to handle requests. On some systems this might lead to low number of workers and hence slow performance, so the default in the docker image is changed to spawn 10 threads. You can override this setting to increase or decrease the number of workers by setting the `ROCKET_WORKERS` variable. When you run bitwarden_rs, it spawns `2 * <number of cpu cores>` workers to handle requests. On some systems this might lead to low number of workers and hence slow performance, so the default in the docker image is changed to spawn 10 threads. You can override this setting to increase or decrease the number of workers by setting the `ROCKET_WORKERS` variable.
In the example bellow, we're starting with 20 workers: In the example below, we're starting with 20 workers:
```sh ```sh
docker run -d --name bitwarden \ docker run -d --name bitwarden \
@@ -252,6 +341,37 @@ docker run -d --name bitwarden \
mprasil/bitwarden:latest mprasil/bitwarden:latest
``` ```
### SMTP configuration
You can configure bitwarden_rs to send emails via a SMTP agent:
```sh
docker run -d --name bitwarden \
-e SMTP_HOST=<smtp.domain.tld> \
-e SMTP_FROM=<bitwarden@domain.tld> \
-e SMTP_PORT=587 \
-e SMTP_SSL=true \
-e SMTP_USERNAME=<username> \
-e SMTP_PASSWORD=<password> \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
When `SMTP_SSL` is set to `true`(this is the default), only TLSv1.1 and TLSv1.2 protocols will be accepted and `SMTP_PORT` will default to `587`. If set to `false`, `SMTP_PORT` will default to `25` and the connection won't be encrypted. This can be very insecure, use this setting only if you know what you're doing.
### Password hint display
Usually, password hints are sent by email. But as bitwarden_rs is made with small or personal deployment in mind, hints are also available from the password hint page, so you don't have to configure an email service. If you want to disable this feature, you can use the `SHOW_PASSWORD_HINT` variable:
```sh
docker run -d --name bitwarden \
-e SHOW_PASSWORD_HINT=false \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
### Disabling or overriding the Vault interface hosting ### Disabling or overriding the Vault interface hosting
As a convenience bitwarden_rs image will also host static files for Vault web interface. You can disable this static file hosting completely by setting the WEB_VAULT_ENABLED variable. As a convenience bitwarden_rs image will also host static files for Vault web interface. You can disable this static file hosting completely by setting the WEB_VAULT_ENABLED variable.
@@ -263,7 +383,6 @@ docker run -d --name bitwarden \
-p 80:80 \ -p 80:80 \
mprasil/bitwarden:latest mprasil/bitwarden:latest
``` ```
Alternatively you can override the Vault files and provide your own static files to host. You can do that by mounting a path with your files over the `/web-vault` directory in the container. Just make sure the directory contains at least `index.html` file. Alternatively you can override the Vault files and provide your own static files to host. You can do that by mounting a path with your files over the `/web-vault` directory in the container. Just make sure the directory contains at least `index.html` file.
```sh ```sh
@@ -291,7 +410,7 @@ docker build -t bitwarden_rs .
## Building binary ## Building binary
For building binary outside the Docker environment and running it locally without docker, please see [build instructions](BUILD.md). For building binary outside the Docker environment and running it locally without docker, please see [build instructions](https://github.com/dani-garcia/bitwarden_rs/blob/master/BUILD.md).
## Available packages ## Available packages
@@ -299,6 +418,11 @@ For building binary outside the Docker environment and running it locally withou
Bitwarden_rs is already packaged for Archlinux thanks to @mqus. There is an [AUR package](https://aur.archlinux.org/packages/bitwarden_rs) (optionally with the [vault web interface](https://aur.archlinux.org/packages/bitwarden_rs-vault/) ) available. Bitwarden_rs is already packaged for Archlinux thanks to @mqus. There is an [AUR package](https://aur.archlinux.org/packages/bitwarden_rs) (optionally with the [vault web interface](https://aur.archlinux.org/packages/bitwarden_rs-vault/) ) available.
## Kubernetes deployment
Please check the [kubernetes-bitwarden_rs](https://github.com/icicimov/kubernetes-bitwarden_rs) repository for example deployment in Kubernetes.
It will setup a fully functional and secure `bitwarden_rs` application in Kubernetes behind [nginx-ingress-controller](https://github.com/kubernetes/ingress-nginx) and AWS [ELBv1](https://aws.amazon.com/elasticloadbalancing/features/#Details_for_Elastic_Load_Balancing_Products). It provides little bit more than just simple deployment but you can use all or just part of the manifests depending on your needs and setup.
## Backing up your vault ## Backing up your vault
### 1. the sqlite3 database ### 1. the sqlite3 database
@@ -306,10 +430,10 @@ Bitwarden_rs is already packaged for Archlinux thanks to @mqus. There is an [AUR
The sqlite3 database should be backed up using the proper sqlite3 backup command. This will ensure the database does not become corrupted if the backup happens during a database write. The sqlite3 database should be backed up using the proper sqlite3 backup command. This will ensure the database does not become corrupted if the backup happens during a database write.
``` ```
sqlite3 /$DATA_FOLDER/db.sqlite3 ".backup '/$DATA_FOLDER/db-backup/backup.sq3'" sqlite3 /$DATA_FOLDER/db.sqlite3 ".backup '/$DATA_FOLDER/db-backup/backup.sqlite3'"
``` ```
This command can be run via a CRON job everyday, however note that it will overwrite the same backup.sq3 file each time. This backup file should therefore be saved via incremental backup either using a CRON job command that appends a timestamp or from another backup app such as Duplicati. This command can be run via a CRON job everyday, however note that it will overwrite the same `backup.sqlite3` file each time. This backup file should therefore be saved via incremental backup either using a CRON job command that appends a timestamp or from another backup app such as Duplicati. To restore simply overwrite `db.sqlite3` with `backup.sqlite3` (while bitwarden_rs is stopped).
### 2. the attachments folder ### 2. the attachments folder
@@ -328,8 +452,8 @@ This is optional, the icon cache can re-download itself however if you have a la
The root user inside the container is already pretty limited in what it can do, so the default setup should be secure enough. However if you wish to go the extra mile to avoid using root even in container, here's how you can do that: The root user inside the container is already pretty limited in what it can do, so the default setup should be secure enough. However if you wish to go the extra mile to avoid using root even in container, here's how you can do that:
1. Create a data folder that's owned by non-root user, so you can use that user to write persistent data. Get the user `id`. In linux you can run `stat <folder_name>` to get/verify the owner ID. 1. Create a data folder that's owned by non-root user, so you can use that user to write persistent data. Get the user `id`. In linux you can run `stat <folder_name>` to get/verify the owner ID.
2. When you run the container, you need to provide the user ID as one of the parameters. Note that this needs to be in the numeric form and not the user name, because docker would try to find such user defined inside the image, which would likely not be there or it would have different ID than your local user and hence wouldn't be able to write the persistent data. This can be done with the `--user` parameter. 2. When you run the container, you need to provide the user ID as one of the parameters. Note that this needs to be in the numeric form and not the username, because docker would try to find such user-defined inside the image, which would likely not be there or it would have different ID than your local user and hence wouldn't be able to write the persistent data. This can be done with the `--user` parameter.
3. bitwarden_rs listens on port `80` inside the container by default, this [won't work with non-root user](https://www.w3.org/Daemon/User/Installation/PrivilegedPorts.html), because regular users aren't allowed to open port bellow `1024`. To overcome this, you need to configure server to listen on a different port, you can use `ROCKET_PORT` to do that. 3. bitwarden_rs listens on port `80` inside the container by default, this [won't work with non-root user](https://www.w3.org/Daemon/User/Installation/PrivilegedPorts.html), because regular users aren't allowed to open port below `1024`. To overcome this, you need to configure server to listen on a different port, you can use `ROCKET_PORT` to do that.
Here's sample docker run, that uses user with id `1000` and with the port redirection configured, so that inside container the service is listening on port `8080` and docker translates that to external (host) port `80`: Here's sample docker run, that uses user with id `1000` and with the port redirection configured, so that inside container the service is listening on port `8080` and docker translates that to external (host) port `80`:
@@ -354,7 +478,15 @@ We use upstream Vault interface directly without any (significant) changes, this
### Inviting users into organization ### Inviting users into organization
The users must already be registered on your server to invite them, because we can't send the invitation via email. The invited users won't get the invitation email, instead they will appear in the interface as if they already accepted the invitation. Organization admin then just needs to confirm them to be proper Organization members and to give them access to the shared secrets. The invited users won't get the invitation email, instead all already registered users will appear in the interface as if they already accepted the invitation. Organization admin then just needs to confirm them to be proper Organization members and to give them access to the shared secrets.
Invited users, that aren't registered yet will show up in the Organization admin interface as "Invited". At the same time an invitation record is created that allows the users to register even if [user registration is disabled](#disable-registration-of-new-users). (unless you [disable this functionality](#disable-invitations)) They will automatically become "Accepted" once they register. From there Organization admin can confirm them to give them access to Organization.
### Running on unencrypted connection
It is strongly recommended to run bitwarden_rs service over HTTPS. However the server itself while [supporting it](#enabling-https) does not strictly require such setup. This makes it a bit easier to spin up the service in cases where you can generally trust the connection (internal and secure network, access over VPN,..) or when you want to put the service behind HTTP proxy, that will do the encryption on the proxy end.
Running over HTTP is still reasonably secure provided you use really strong master password and that you avoid using web Vault over connection that is vulnerable to MITM attacks where attacker could inject javascript into your interface. However some forms of 2FA might not work in this setup and [Vault doesn't work in this configuration in Chrome](https://github.com/bitwarden/web/issues/254).
## Get in touch ## Get in touch

View File

@@ -1,7 +1,7 @@
--- a/src/app/services/services.module.ts --- a/src/app/services/services.module.ts
+++ b/src/app/services/services.module.ts +++ b/src/app/services/services.module.ts
@@ -116,17 +116,15 @@ const exportService = new ExportService(folderService, cipherService, apiService @@ -120,20 +120,16 @@ const notificationsService = new NotificationsService(userService, syncService,
const importService = new ImportService(cipherService, folderService, apiService, i18nService, collectionService); const environmentService = new EnvironmentService(apiService, storageService, notificationsService);
const auditService = new AuditService(cryptoFunctionService, apiService); const auditService = new AuditService(cryptoFunctionService, apiService);
-const analytics = new Analytics(window, () => platformUtilsService.isDev() || platformUtilsService.isSelfHost(), -const analytics = new Analytics(window, () => platformUtilsService.isDev() || platformUtilsService.isSelfHost(),
@@ -15,9 +15,13 @@
- const isDev = platformUtilsService.isDev(); - const isDev = platformUtilsService.isDev();
- if (!isDev && platformUtilsService.isSelfHost()) { - if (!isDev && platformUtilsService.isSelfHost()) {
- environmentService.baseUrl = window.location.origin; - environmentService.baseUrl = window.location.origin;
- } else {
- environmentService.notificationsUrl = isDev ? 'http://localhost:61840' :
- 'https://notifications.bitwarden.com'; // window.location.origin + '/notifications';
- } - }
+ const isDev = false; + const isDev = false;
+ environmentService.baseUrl = window.location.origin; + environmentService.baseUrl = window.location.origin;
await apiService.setUrls({ + environmentService.notificationsUrl = window.location.origin + '/notifications';
apiService.setUrls({
base: isDev ? null : window.location.origin, base: isDev ? null : window.location.origin,
api: isDev ? 'http://localhost:4000' : null, api: isDev ? 'http://localhost:4000' : null,

View File

@@ -0,0 +1,3 @@
ALTER TABLE ciphers
ADD COLUMN
password_history TEXT;

View File

@@ -0,0 +1 @@
DROP TABLE invitations;

View File

@@ -0,0 +1,3 @@
CREATE TABLE invitations (
email TEXT NOT NULL PRIMARY KEY
);

View File

@@ -0,0 +1,7 @@
ALTER TABLE users
ADD COLUMN
client_kdf_type INTEGER NOT NULL DEFAULT 0; -- PBKDF2
ALTER TABLE users
ADD COLUMN
client_kdf_iter INTEGER NOT NULL DEFAULT 5000;

View File

@@ -1 +1 @@
nightly-2018-07-18 nightly-2018-10-03

View File

@@ -5,6 +5,7 @@ use db::models::*;
use api::{PasswordData, JsonResult, EmptyResult, JsonUpcase, NumberOrString}; use api::{PasswordData, JsonResult, EmptyResult, JsonUpcase, NumberOrString};
use auth::Headers; use auth::Headers;
use mail;
use CONFIG; use CONFIG;
@@ -12,6 +13,8 @@ use CONFIG;
#[allow(non_snake_case)] #[allow(non_snake_case)]
struct RegisterData { struct RegisterData {
Email: String, Email: String,
Kdf: Option<i32>,
KdfIterations: Option<i32>,
Key: String, Key: String,
Keys: Option<KeysData>, Keys: Option<KeysData>,
MasterPasswordHash: String, MasterPasswordHash: String,
@@ -30,15 +33,42 @@ struct KeysData {
fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult { fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
let data: RegisterData = data.into_inner().data; let data: RegisterData = data.into_inner().data;
if !CONFIG.signups_allowed {
err!("Signups not allowed") let mut user = match User::find_by_mail(&data.Email, &conn) {
Some(mut user) => {
if Invitation::take(&data.Email, &conn) {
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).iter_mut() {
user_org.status = UserOrgStatus::Accepted as i32;
if user_org.save(&conn).is_err() {
err!("Failed to accept user to organization")
}
};
user
} else if CONFIG.signups_allowed {
err!("Account with this email already exists")
} else {
err!("Registration not allowed")
}
},
None => {
if CONFIG.signups_allowed || Invitation::take(&data.Email, &conn) {
User::new(data.Email)
} else {
err!("Registration not allowed")
}
}
};
if let Some(client_kdf_iter) = data.KdfIterations {
user.client_kdf_iter = client_kdf_iter;
} }
if User::find_by_mail(&data.Email, &conn).is_some() { if let Some(client_kdf_type) = data.Kdf {
err!("Email already exists") user.client_kdf_type = client_kdf_type;
} }
let mut user = User::new(data.Email, data.Key, data.MasterPasswordHash); user.set_password(&data.MasterPasswordHash);
user.key = data.Key;
// Add extra fields if present // Add extra fields if present
if let Some(name) = data.Name { if let Some(name) = data.Name {
@@ -54,9 +84,10 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
user.public_key = Some(keys.PublicKey); user.public_key = Some(keys.PublicKey);
} }
user.save(&conn); match user.save(&conn) {
Ok(()) => Ok(()),
Ok(()) Err(_) => err!("Failed to save user")
}
} }
#[get("/accounts/profile")] #[get("/accounts/profile")]
@@ -85,10 +116,14 @@ fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -
let mut user = headers.user; let mut user = headers.user;
user.name = data.Name; user.name = data.Name;
user.password_hint = data.MasterPasswordHint; user.password_hint = match data.MasterPasswordHint {
user.save(&conn); Some(ref h) if h.is_empty() => None,
_ => data.MasterPasswordHint,
Ok(Json(user.to_json(&conn))) };
match user.save(&conn) {
Ok(()) => Ok(Json(user.to_json(&conn))),
Err(_) => err!("Failed to save user profile")
}
} }
#[get("/users/<uuid>/public-key")] #[get("/users/<uuid>/public-key")]
@@ -114,11 +149,14 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
user.private_key = Some(data.EncryptedPrivateKey); user.private_key = Some(data.EncryptedPrivateKey);
user.public_key = Some(data.PublicKey); user.public_key = Some(data.PublicKey);
user.save(&conn); match user.save(&conn) {
Ok(()) => Ok(Json(user.to_json(&conn))),
Ok(Json(user.to_json(&conn))) Err(_) => err!("Failed to save the user's keys")
}
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[allow(non_snake_case)]
struct ChangePassData { struct ChangePassData {
@@ -138,9 +176,40 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
user.set_password(&data.NewMasterPasswordHash); user.set_password(&data.NewMasterPasswordHash);
user.key = data.Key; user.key = data.Key;
user.save(&conn); match user.save(&conn) {
Ok(()) => Ok(()),
Err(_) => err!("Failed to save password")
}
}
Ok(()) #[derive(Deserialize)]
#[allow(non_snake_case)]
struct ChangeKdfData {
Kdf: i32,
KdfIterations: i32,
MasterPasswordHash: String,
NewMasterPasswordHash: String,
Key: String,
}
#[post("/accounts/kdf", data = "<data>")]
fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: ChangeKdfData = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password")
}
user.client_kdf_iter = data.KdfIterations;
user.client_kdf_type = data.Kdf;
user.set_password(&data.NewMasterPasswordHash);
user.key = data.Key;
match user.save(&conn) {
Ok(()) => Ok(()),
Err(_) => err!("Failed to save password settings")
}
} }
#[post("/accounts/security-stamp", data = "<data>")] #[post("/accounts/security-stamp", data = "<data>")]
@@ -153,9 +222,10 @@ fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -
} }
user.reset_security_stamp(); user.reset_security_stamp();
user.save(&conn); match user.save(&conn) {
Ok(()) => Ok(()),
Ok(()) Err(_) => err!("Failed to reset security stamp")
}
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@@ -210,12 +280,18 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
user.set_password(&data.NewMasterPasswordHash); user.set_password(&data.NewMasterPasswordHash);
user.key = data.Key; user.key = data.Key;
user.save(&conn); match user.save(&conn) {
Ok(()) => Ok(()),
Ok(()) Err(_) => err!("Failed to save email address")
}
} }
#[post("/accounts/delete", data = "<data>")] #[post("/accounts/delete", data = "<data>")]
fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
delete_account(data, headers, conn)
}
#[delete("/accounts", data = "<data>")]
fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult { fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: PasswordData = data.into_inner().data; let data: PasswordData = data.into_inner().data;
let user = headers.user; let user = headers.user;
@@ -224,27 +300,10 @@ fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn
err!("Invalid password") err!("Invalid password")
} }
// Delete ciphers and their attachments match user.delete(&conn) {
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn) { Ok(()) => Ok(()),
if cipher.delete(&conn).is_err() { Err(_) => err!("Failed deleting user account, are you the only owner of some organization?")
err!("Failed deleting cipher")
} }
}
// Delete folders
for f in Folder::find_by_user(&user.uuid, &conn) {
if f.delete(&conn).is_err() {
err!("Failed deleting folder")
}
}
// Delete devices
for d in Device::find_by_user(&user.uuid, &conn) { d.delete(&conn); }
// Delete user
user.delete(&conn);
Ok(())
} }
#[get("/accounts/revision-date")] #[get("/accounts/revision-date")]
@@ -263,15 +322,43 @@ struct PasswordHintData {
fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult { fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
let data: PasswordHintData = data.into_inner().data; let data: PasswordHintData = data.into_inner().data;
if !CONFIG.show_password_hint { let hint = match User::find_by_mail(&data.Email, &conn) {
return Ok(()) Some(user) => user.password_hint,
None => return Ok(()),
};
if let Some(ref mail_config) = CONFIG.mail {
if let Err(e) = mail::send_password_hint(&data.Email, hint, mail_config) {
err!(format!("There have been a problem sending the email: {}", e));
}
} else if CONFIG.show_password_hint {
if let Some(hint) = hint {
err!(format!("Your password hint is: {}", &hint));
} else {
err!("Sorry, you have no password hint...");
}
} }
match User::find_by_mail(&data.Email, &conn) { Ok(())
Some(user) => { }
let hint = user.password_hint.to_owned().unwrap_or_default();
err!(format!("Your password hint is: {}", hint)) #[derive(Deserialize)]
}, #[allow(non_snake_case)]
None => Ok(()), struct PreloginData {
} Email: String,
}
#[post("/accounts/prelogin", data = "<data>")]
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
let data: PreloginData = data.into_inner().data;
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
Some(user) => (user.client_kdf_type, user.client_kdf_iter),
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
};
Ok(Json(json!({
"Kdf": kdf_type,
"KdfIterations": kdf_iter
})))
} }

View File

@@ -1,6 +1,7 @@
use std::path::Path; use std::path::Path;
use std::collections::HashSet; use std::collections::HashSet;
use rocket::State;
use rocket::Data; use rocket::Data;
use rocket::http::ContentType; use rocket::http::ContentType;
@@ -16,13 +17,19 @@ use db::models::*;
use crypto; use crypto;
use api::{self, PasswordData, JsonResult, EmptyResult, JsonUpcase}; use api::{self, PasswordData, JsonResult, EmptyResult, JsonUpcase, WebSocketUsers, UpdateType};
use auth::Headers; use auth::Headers;
use CONFIG; use CONFIG;
#[get("/sync")] #[derive(FromForm)]
fn sync(headers: Headers, conn: DbConn) -> JsonResult { #[allow(non_snake_case)]
struct SyncData {
excludeDomains: bool,
}
#[get("/sync?<data>")]
fn sync(data: SyncData, headers: Headers, conn: DbConn) -> JsonResult {
let user_json = headers.user.to_json(&conn); let user_json = headers.user.to_json(&conn);
let folders = Folder::find_by_user(&headers.user.uuid, &conn); let folders = Folder::find_by_user(&headers.user.uuid, &conn);
@@ -34,7 +41,7 @@ fn sync(headers: Headers, conn: DbConn) -> JsonResult {
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn); let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
let ciphers_json: Vec<Value> = ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect(); let ciphers_json: Vec<Value> = ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect();
let domains_json = api::core::get_eq_domains(headers).unwrap().into_inner(); let domains_json = if data.excludeDomains { Value::Null } else { api::core::get_eq_domains(headers).unwrap().into_inner() };
Ok(Json(json!({ Ok(Json(json!({
"Profile": user_json, "Profile": user_json,
@@ -46,6 +53,13 @@ fn sync(headers: Headers, conn: DbConn) -> JsonResult {
}))) })))
} }
#[get("/sync")]
fn sync_no_query(headers: Headers, conn: DbConn) -> JsonResult {
let sync_data = SyncData {
excludeDomains: false,
};
sync(sync_data, headers, conn)
}
#[get("/ciphers")] #[get("/ciphers")]
fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult { fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
@@ -56,6 +70,7 @@ fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
Ok(Json(json!({ Ok(Json(json!({
"Data": ciphers_json, "Data": ciphers_json,
"Object": "list", "Object": "list",
"ContinuationToken": null
}))) })))
} }
@@ -86,7 +101,7 @@ fn get_cipher_details(uuid: String, headers: Headers, conn: DbConn) -> JsonResul
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
#[allow(non_snake_case)] #[allow(non_snake_case)]
struct CipherData { pub struct CipherData {
// Id is optional as it is included only in bulk share // Id is optional as it is included only in bulk share
Id: Option<String>, Id: Option<String>,
// Folder id is not included in import // Folder id is not included in import
@@ -100,8 +115,8 @@ struct CipherData {
Card = 3, Card = 3,
Identity = 4 Identity = 4
*/ */
Type: i32, // TODO: Change this to NumberOrString pub Type: i32, // TODO: Change this to NumberOrString
Name: String, pub Name: String,
Notes: Option<String>, Notes: Option<String>,
Fields: Option<Value>, Fields: Option<Value>,
@@ -112,30 +127,33 @@ struct CipherData {
Identity: Option<Value>, Identity: Option<Value>,
Favorite: Option<bool>, Favorite: Option<bool>,
PasswordHistory: Option<Value>,
} }
#[post("/ciphers/admin", data = "<data>")] #[post("/ciphers/admin", data = "<data>")]
fn post_ciphers_admin(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult { fn post_ciphers_admin(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
// TODO: Implement this correctly // TODO: Implement this correctly
post_ciphers(data, headers, conn) post_ciphers(data, headers, conn, ws)
} }
#[post("/ciphers", data = "<data>")] #[post("/ciphers", data = "<data>")]
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult { fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
let data: CipherData = data.into_inner().data; let data: CipherData = data.into_inner().data;
let mut cipher = Cipher::new(data.Type, data.Name.clone()); let mut cipher = Cipher::new(data.Type, data.Name.clone());
update_cipher_from_data(&mut cipher, data, &headers, true, &conn)?; update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &ws, UpdateType::SyncCipherCreate)?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn))) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
} }
fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &Headers, is_new_or_shared: bool, conn: &DbConn) -> EmptyResult { pub fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &Headers, shared_to_collection: bool, conn: &DbConn, ws: &State<WebSocketUsers>, ut: UpdateType) -> EmptyResult {
if is_new_or_shared {
if let Some(org_id) = data.OrganizationId { if let Some(org_id) = data.OrganizationId {
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) { match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
None => err!("You don't have permission to add item to organization"), None => err!("You don't have permission to add item to organization"),
Some(org_user) => if org_user.has_full_access() { Some(org_user) => if shared_to_collection
|| org_user.has_full_access()
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
cipher.organization_uuid = Some(org_id); cipher.organization_uuid = Some(org_id);
cipher.user_uuid = None; cipher.user_uuid = None;
} else { } else {
@@ -145,7 +163,6 @@ fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &Head
} else { } else {
cipher.user_uuid = Some(headers.user.uuid.clone()); cipher.user_uuid = Some(headers.user.uuid.clone());
} }
}
if let Some(ref folder_id) = data.FolderId { if let Some(ref folder_id) = data.FolderId {
match Folder::find_by_uuid(folder_id, conn) { match Folder::find_by_uuid(folder_id, conn) {
@@ -177,6 +194,7 @@ fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &Head
type_data["Name"] = Value::String(data.Name.clone()); type_data["Name"] = Value::String(data.Name.clone());
type_data["Notes"] = data.Notes.clone().map(Value::String).unwrap_or(Value::Null); type_data["Notes"] = data.Notes.clone().map(Value::String).unwrap_or(Value::Null);
type_data["Fields"] = data.Fields.clone().unwrap_or(Value::Null); type_data["Fields"] = data.Fields.clone().unwrap_or(Value::Null);
type_data["PasswordHistory"] = data.PasswordHistory.clone().unwrap_or(Value::Null);
// TODO: ******* Backwards compat end ********** // TODO: ******* Backwards compat end **********
cipher.favorite = data.Favorite.unwrap_or(false); cipher.favorite = data.Favorite.unwrap_or(false);
@@ -184,8 +202,13 @@ fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &Head
cipher.notes = data.Notes; cipher.notes = data.Notes;
cipher.fields = data.Fields.map(|f| f.to_string()); cipher.fields = data.Fields.map(|f| f.to_string());
cipher.data = type_data.to_string(); cipher.data = type_data.to_string();
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
cipher.save(&conn); match cipher.save(&conn) {
Ok(()) => (),
Err(_) => println!("Error: Failed to save cipher")
};
ws.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
if cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn).is_err() { if cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn).is_err() {
err!("Error saving the folder information") err!("Error saving the folder information")
@@ -215,15 +238,19 @@ struct RelationsData {
#[post("/ciphers/import", data = "<data>")] #[post("/ciphers/import", data = "<data>")]
fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbConn) -> EmptyResult { fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let data: ImportData = data.into_inner().data; let data: ImportData = data.into_inner().data;
// Read and create the folders // Read and create the folders
let folders: Vec<_> = data.Folders.into_iter().map(|folder| { let mut folders: Vec<_> = Vec::new();
let mut folder = Folder::new(headers.user.uuid.clone(), folder.Name); for folder in data.Folders.into_iter() {
folder.save(&conn); let mut new_folder = Folder::new(headers.user.uuid.clone(), folder.Name);
folder if new_folder.save(&conn).is_err() {
}).collect(); err!("Failed importing folders")
} else {
folders.push(new_folder);
}
}
// Read the relations between folders and ciphers // Read the relations between folders and ciphers
use std::collections::HashMap; use std::collections::HashMap;
@@ -239,7 +266,7 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
.map(|i| folders[*i].uuid.clone()); .map(|i| folders[*i].uuid.clone());
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone()); let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
update_cipher_from_data(&mut cipher, cipher_data, &headers, true, &conn)?; update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &ws, UpdateType::SyncCipherCreate)?;
cipher.move_to_folder(folder_uuid, &headers.user.uuid.clone(), &conn).ok(); cipher.move_to_folder(folder_uuid, &headers.user.uuid.clone(), &conn).ok();
} }
@@ -253,22 +280,22 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
#[put("/ciphers/<uuid>/admin", data = "<data>")] #[put("/ciphers/<uuid>/admin", data = "<data>")]
fn put_cipher_admin(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult { fn put_cipher_admin(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
put_cipher(uuid, data, headers, conn) put_cipher(uuid, data, headers, conn, ws)
} }
#[post("/ciphers/<uuid>/admin", data = "<data>")] #[post("/ciphers/<uuid>/admin", data = "<data>")]
fn post_cipher_admin(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult { fn post_cipher_admin(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
post_cipher(uuid, data, headers, conn) post_cipher(uuid, data, headers, conn, ws)
} }
#[post("/ciphers/<uuid>", data = "<data>")] #[post("/ciphers/<uuid>", data = "<data>")]
fn post_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult { fn post_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
put_cipher(uuid, data, headers, conn) put_cipher(uuid, data, headers, conn, ws)
} }
#[put("/ciphers/<uuid>", data = "<data>")] #[put("/ciphers/<uuid>", data = "<data>")]
fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult { fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
let data: CipherData = data.into_inner().data; let data: CipherData = data.into_inner().data;
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) { let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
@@ -280,7 +307,7 @@ fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn
err!("Cipher is not write accessible") err!("Cipher is not write accessible")
} }
update_cipher_from_data(&mut cipher, data, &headers, false, &conn)?; update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &ws, UpdateType::SyncCipherUpdate)?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn))) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
} }
@@ -323,9 +350,15 @@ fn post_collections_admin(uuid: String, data: JsonUpcase<CollectionsAdminData>,
Some(collection) => { Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, &conn) { if collection.is_writable_by_user(&headers.user.uuid, &conn) {
if posted_collections.contains(&collection.uuid) { // Add to collection if posted_collections.contains(&collection.uuid) { // Add to collection
CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn); match CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn) {
Ok(()) => (),
Err(_) => err!("Failed to add cipher to collection")
};
} else { // Remove from collection } else { // Remove from collection
CollectionCipher::delete(&cipher.uuid, &collection.uuid, &conn); match CollectionCipher::delete(&cipher.uuid, &collection.uuid, &conn) {
Ok(()) => (),
Err(_) => err!("Failed to remove cipher from collection")
};
} }
} else { } else {
err!("No rights to modify the collection") err!("No rights to modify the collection")
@@ -345,17 +378,17 @@ struct ShareCipherData {
} }
#[post("/ciphers/<uuid>/share", data = "<data>")] #[post("/ciphers/<uuid>/share", data = "<data>")]
fn post_cipher_share(uuid: String, data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn) -> JsonResult { fn post_cipher_share(uuid: String, data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
let data: ShareCipherData = data.into_inner().data; let data: ShareCipherData = data.into_inner().data;
share_cipher_by_uuid(&uuid, data, &headers, &conn) share_cipher_by_uuid(&uuid, data, &headers, &conn, &ws)
} }
#[put("/ciphers/<uuid>/share", data = "<data>")] #[put("/ciphers/<uuid>/share", data = "<data>")]
fn put_cipher_share(uuid: String, data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn) -> JsonResult { fn put_cipher_share(uuid: String, data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
let data: ShareCipherData = data.into_inner().data; let data: ShareCipherData = data.into_inner().data;
share_cipher_by_uuid(&uuid, data, &headers, &conn) share_cipher_by_uuid(&uuid, data, &headers, &conn, &ws)
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@@ -366,15 +399,15 @@ struct ShareSelectedCipherData {
} }
#[put("/ciphers/share", data = "<data>")] #[put("/ciphers/share", data = "<data>")]
fn put_cipher_share_seleted(data: JsonUpcase<ShareSelectedCipherData>, headers: Headers, conn: DbConn) -> EmptyResult { fn put_cipher_share_seleted(data: JsonUpcase<ShareSelectedCipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let mut data: ShareSelectedCipherData = data.into_inner().data; let mut data: ShareSelectedCipherData = data.into_inner().data;
let mut cipher_ids: Vec<String> = Vec::new(); let mut cipher_ids: Vec<String> = Vec::new();
if data.Ciphers.len() == 0 { if data.Ciphers.is_empty() {
err!("You must select at least one cipher.") err!("You must select at least one cipher.")
} }
if data.CollectionIds.len() == 0 { if data.CollectionIds.is_empty() {
err!("You must select at least one collection.") err!("You must select at least one collection.")
} }
@@ -387,7 +420,7 @@ fn put_cipher_share_seleted(data: JsonUpcase<ShareSelectedCipherData>, headers:
let attachments = Attachment::find_by_ciphers(cipher_ids, &conn); let attachments = Attachment::find_by_ciphers(cipher_ids, &conn);
if attachments.len() > 0 { if !attachments.is_empty() {
err!("Ciphers should not have any attachments.") err!("Ciphers should not have any attachments.")
} }
@@ -398,15 +431,16 @@ fn put_cipher_share_seleted(data: JsonUpcase<ShareSelectedCipherData>, headers:
}; };
match shared_cipher_data.Cipher.Id.take() { match shared_cipher_data.Cipher.Id.take() {
Some(id) => share_cipher_by_uuid(&id, shared_cipher_data , &headers, &conn)?, Some(id) => share_cipher_by_uuid(&id, shared_cipher_data , &headers, &conn, &ws)?,
None => err!("Request missing ids field") None => err!("Request missing ids field")
}; };
} }
Ok(()) Ok(())
} }
fn share_cipher_by_uuid(uuid: &str, data: ShareCipherData, headers: &Headers, conn: &DbConn) -> JsonResult { fn share_cipher_by_uuid(uuid: &str, data: ShareCipherData, headers: &Headers, conn: &DbConn, ws: &State<WebSocketUsers>) -> JsonResult {
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) { let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
Some(cipher) => { Some(cipher) => {
if cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) { if cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
@@ -418,22 +452,27 @@ fn share_cipher_by_uuid(uuid: &str, data: ShareCipherData, headers: &Headers, co
None => err!("Cipher doesn't exist") None => err!("Cipher doesn't exist")
}; };
match data.Cipher.OrganizationId { match data.Cipher.OrganizationId.clone() {
None => err!("Organization id not provided"), None => err!("Organization id not provided"),
Some(_) => { Some(organization_uuid) => {
update_cipher_from_data(&mut cipher, data.Cipher, &headers, true, &conn)?; let mut shared_to_collection = false;
for uuid in &data.CollectionIds { for uuid in &data.CollectionIds {
match Collection::find_by_uuid(uuid, &conn) { match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
None => err!("Invalid collection ID provided"), None => err!("Invalid collection ID provided"),
Some(collection) => { Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, &conn) { if collection.is_writable_by_user(&headers.user.uuid, &conn) {
CollectionCipher::save(&cipher.uuid.clone(), &collection.uuid, &conn); match CollectionCipher::save(&cipher.uuid.clone(), &collection.uuid, &conn) {
Ok(()) => (),
Err(_) => err!("Failed to add cipher to collection")
};
shared_to_collection = true;
} else { } else {
err!("No rights to modify the collection") err!("No rights to modify the collection")
} }
} }
} }
} }
update_cipher_from_data(&mut cipher, data.Cipher, &headers, shared_to_collection, &conn, &ws, UpdateType::SyncCipherUpdate)?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn))) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
} }
@@ -484,7 +523,10 @@ fn post_attachment(uuid: String, data: Data, content_type: &ContentType, headers
}; };
let attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size); let attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size);
attachment.save(&conn); match attachment.save(&conn) {
Ok(()) => (),
Err(_) => println!("Error: failed to save attachment")
};
}).expect("Error processing multipart data"); }).expect("Error processing multipart data");
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn))) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
@@ -496,55 +538,65 @@ fn post_attachment_admin(uuid: String, data: Data, content_type: &ContentType, h
} }
#[post("/ciphers/<uuid>/attachment/<attachment_id>/share", format = "multipart/form-data", data = "<data>")] #[post("/ciphers/<uuid>/attachment/<attachment_id>/share", format = "multipart/form-data", data = "<data>")]
fn post_attachment_share(uuid: String, attachment_id: String, data: Data, content_type: &ContentType, headers: Headers, conn: DbConn) -> JsonResult { fn post_attachment_share(uuid: String, attachment_id: String, data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn)?; _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &ws)?;
post_attachment(uuid, data, content_type, headers, conn) post_attachment(uuid, data, content_type, headers, conn)
} }
#[post("/ciphers/<uuid>/attachment/<attachment_id>/delete-admin")] #[post("/ciphers/<uuid>/attachment/<attachment_id>/delete-admin")]
fn delete_attachment_post_admin(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> EmptyResult { fn delete_attachment_post_admin(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
delete_attachment(uuid, attachment_id, headers, conn) delete_attachment(uuid, attachment_id, headers, conn, ws)
} }
#[post("/ciphers/<uuid>/attachment/<attachment_id>/delete")] #[post("/ciphers/<uuid>/attachment/<attachment_id>/delete")]
fn delete_attachment_post(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> EmptyResult { fn delete_attachment_post(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
delete_attachment(uuid, attachment_id, headers, conn) delete_attachment(uuid, attachment_id, headers, conn, ws)
} }
#[delete("/ciphers/<uuid>/attachment/<attachment_id>")] #[delete("/ciphers/<uuid>/attachment/<attachment_id>")]
fn delete_attachment(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> EmptyResult { fn delete_attachment(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn) _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &ws)
}
#[delete("/ciphers/<uuid>/attachment/<attachment_id>/admin")]
fn delete_attachment_admin(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &ws)
} }
#[post("/ciphers/<uuid>/delete")] #[post("/ciphers/<uuid>/delete")]
fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult { fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn) _delete_cipher_by_uuid(&uuid, &headers, &conn, &ws)
} }
#[post("/ciphers/<uuid>/delete-admin")] #[post("/ciphers/<uuid>/delete-admin")]
fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult { fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn) _delete_cipher_by_uuid(&uuid, &headers, &conn, &ws)
} }
#[delete("/ciphers/<uuid>")] #[delete("/ciphers/<uuid>")]
fn delete_cipher(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult { fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn) _delete_cipher_by_uuid(&uuid, &headers, &conn, &ws)
}
#[delete("/ciphers/<uuid>/admin")]
fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn, &ws)
} }
#[delete("/ciphers", data = "<data>")] #[delete("/ciphers", data = "<data>")]
fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn) -> EmptyResult { fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let data: Value = data.into_inner().data; let data: Value = data.into_inner().data;
let uuids = match data.get("Ids") { let uuids = match data.get("Ids") {
Some(ids) => match ids.as_array() { Some(ids) => match ids.as_array() {
Some(ids) => ids.iter().filter_map(|uuid| { uuid.as_str() }), Some(ids) => ids.iter().filter_map(Value::as_str),
None => err!("Posted ids field is not an array") None => err!("Posted ids field is not an array")
}, },
None => err!("Request missing ids field") None => err!("Request missing ids field")
}; };
for uuid in uuids { for uuid in uuids {
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn) { if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, &ws) {
return error; return error;
}; };
} }
@@ -553,12 +605,12 @@ fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbCon
} }
#[post("/ciphers/delete", data = "<data>")] #[post("/ciphers/delete", data = "<data>")]
fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn) -> EmptyResult { fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
delete_cipher_selected(data, headers, conn) delete_cipher_selected(data, headers, conn, ws)
} }
#[post("/ciphers/move", data = "<data>")] #[post("/ciphers/move", data = "<data>")]
fn move_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn) -> EmptyResult { fn move_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let data = data.into_inner().data; let data = data.into_inner().data;
let folder_id = match data.get("FolderId") { let folder_id = match data.get("FolderId") {
@@ -583,7 +635,7 @@ fn move_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn)
let uuids = match data.get("Ids") { let uuids = match data.get("Ids") {
Some(ids) => match ids.as_array() { Some(ids) => match ids.as_array() {
Some(ids) => ids.iter().filter_map(|uuid| { uuid.as_str() }), Some(ids) => ids.iter().filter_map(Value::as_str),
None => err!("Posted ids field is not an array") None => err!("Posted ids field is not an array")
}, },
None => err!("Request missing ids field") None => err!("Request missing ids field")
@@ -603,19 +655,23 @@ fn move_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn)
if cipher.move_to_folder(folder_id.clone(), &headers.user.uuid, &conn).is_err() { if cipher.move_to_folder(folder_id.clone(), &headers.user.uuid, &conn).is_err() {
err!("Error saving the folder information") err!("Error saving the folder information")
} }
cipher.save(&conn); match cipher.save(&conn) {
Ok(()) => (),
Err(_) => println!("Error: Failed to save cipher")
};
ws.send_cipher_update(UpdateType::SyncCipherUpdate, &cipher, &cipher.update_users_revision(&conn));
} }
Ok(()) Ok(())
} }
#[put("/ciphers/move", data = "<data>")] #[put("/ciphers/move", data = "<data>")]
fn move_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn) -> EmptyResult { fn move_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
move_cipher_selected(data, headers, conn) move_cipher_selected(data, headers, conn, ws)
} }
#[post("/ciphers/purge", data = "<data>")] #[post("/ciphers/purge", data = "<data>")]
fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult { fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let data: PasswordData = data.into_inner().data; let data: PasswordData = data.into_inner().data;
let password_hash = data.MasterPasswordHash; let password_hash = data.MasterPasswordHash;
@@ -630,6 +686,9 @@ fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) ->
if cipher.delete(&conn).is_err() { if cipher.delete(&conn).is_err() {
err!("Failed deleting cipher") err!("Failed deleting cipher")
} }
else {
ws.send_cipher_update(UpdateType::SyncCipherDelete, &cipher, &cipher.update_users_revision(&conn));
}
} }
// Delete folders // Delete folders
@@ -637,13 +696,16 @@ fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) ->
if f.delete(&conn).is_err() { if f.delete(&conn).is_err() {
err!("Failed deleting folder") err!("Failed deleting folder")
} }
else {
ws.send_folder_update(UpdateType::SyncFolderCreate, &f);
}
} }
Ok(()) Ok(())
} }
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn) -> EmptyResult { fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, ws: &State<WebSocketUsers>) -> EmptyResult {
let cipher = match Cipher::find_by_uuid(uuid, conn) { let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
Some(cipher) => cipher, Some(cipher) => cipher,
None => err!("Cipher doesn't exist"), None => err!("Cipher doesn't exist"),
}; };
@@ -652,13 +714,16 @@ fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn) -> Empty
err!("Cipher can't be deleted by user") err!("Cipher can't be deleted by user")
} }
match cipher.delete(conn) { match cipher.delete(&conn) {
Ok(()) => Ok(()), Ok(()) => {
ws.send_cipher_update(UpdateType::SyncCipherDelete, &cipher, &cipher.update_users_revision(&conn));
Ok(())
}
Err(_) => err!("Failed deleting cipher") Err(_) => err!("Failed deleting cipher")
} }
} }
fn _delete_cipher_attachment_by_id(uuid: &str, attachment_id: &str, headers: &Headers, conn: &DbConn) -> EmptyResult { fn _delete_cipher_attachment_by_id(uuid: &str, attachment_id: &str, headers: &Headers, conn: &DbConn, ws: &State<WebSocketUsers>) -> EmptyResult {
let attachment = match Attachment::find_by_id(&attachment_id, &conn) { let attachment = match Attachment::find_by_id(&attachment_id, &conn) {
Some(attachment) => attachment, Some(attachment) => attachment,
None => err!("Attachment doesn't exist") None => err!("Attachment doesn't exist")
@@ -679,7 +744,10 @@ fn _delete_cipher_attachment_by_id(uuid: &str, attachment_id: &str, headers: &He
// Delete attachment // Delete attachment
match attachment.delete(&conn) { match attachment.delete(&conn) {
Ok(()) => Ok(()), Ok(()) => {
ws.send_cipher_update(UpdateType::SyncCipherDelete, &cipher, &cipher.update_users_revision(&conn));
Ok(())
}
Err(_) => err!("Deleting attachement failed") Err(_) => err!("Deleting attachement failed")
} }
} }

View File

@@ -1,9 +1,10 @@
use rocket::State;
use rocket_contrib::{Json, Value}; use rocket_contrib::{Json, Value};
use db::DbConn; use db::DbConn;
use db::models::*; use db::models::*;
use api::{JsonResult, EmptyResult, JsonUpcase}; use api::{JsonResult, EmptyResult, JsonUpcase, WebSocketUsers, UpdateType};
use auth::Headers; use auth::Headers;
#[get("/folders")] #[get("/folders")]
@@ -15,6 +16,7 @@ fn get_folders(headers: Headers, conn: DbConn) -> JsonResult {
Ok(Json(json!({ Ok(Json(json!({
"Data": folders_json, "Data": folders_json,
"Object": "list", "Object": "list",
"ContinuationToken": null,
}))) })))
} }
@@ -40,23 +42,26 @@ pub struct FolderData {
} }
#[post("/folders", data = "<data>")] #[post("/folders", data = "<data>")]
fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn) -> JsonResult { fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
let data: FolderData = data.into_inner().data; let data: FolderData = data.into_inner().data;
let mut folder = Folder::new(headers.user.uuid.clone(), data.Name); let mut folder = Folder::new(headers.user.uuid.clone(), data.Name);
folder.save(&conn); if folder.save(&conn).is_err() {
err!("Failed to save folder")
}
ws.send_folder_update(UpdateType::SyncFolderCreate, &folder);
Ok(Json(folder.to_json())) Ok(Json(folder.to_json()))
} }
#[post("/folders/<uuid>", data = "<data>")] #[post("/folders/<uuid>", data = "<data>")]
fn post_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn) -> JsonResult { fn post_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
put_folder(uuid, data, headers, conn) put_folder(uuid, data, headers, conn, ws)
} }
#[put("/folders/<uuid>", data = "<data>")] #[put("/folders/<uuid>", data = "<data>")]
fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn) -> JsonResult { fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
let data: FolderData = data.into_inner().data; let data: FolderData = data.into_inner().data;
let mut folder = match Folder::find_by_uuid(&uuid, &conn) { let mut folder = match Folder::find_by_uuid(&uuid, &conn) {
@@ -70,18 +75,21 @@ fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn
folder.name = data.Name; folder.name = data.Name;
folder.save(&conn); if folder.save(&conn).is_err() {
err!("Failed to save folder")
}
ws.send_folder_update(UpdateType::SyncFolderUpdate, &folder);
Ok(Json(folder.to_json())) Ok(Json(folder.to_json()))
} }
#[post("/folders/<uuid>/delete")] #[post("/folders/<uuid>/delete")]
fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult { fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
delete_folder(uuid, headers, conn) delete_folder(uuid, headers, conn, ws)
} }
#[delete("/folders/<uuid>")] #[delete("/folders/<uuid>")]
fn delete_folder(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult { fn delete_folder(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let folder = match Folder::find_by_uuid(&uuid, &conn) { let folder = match Folder::find_by_uuid(&uuid, &conn) {
Some(folder) => folder, Some(folder) => folder,
_ => err!("Invalid folder") _ => err!("Invalid folder")
@@ -93,7 +101,10 @@ fn delete_folder(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
// Delete the actual folder entry // Delete the actual folder entry
match folder.delete(&conn) { match folder.delete(&conn) {
Ok(()) => Ok(()), Ok(()) => {
ws.send_folder_update(UpdateType::SyncFolderDelete, &folder);
Ok(())
}
Err(_) => err!("Failed deleting folder") Err(_) => err!("Failed deleting folder")
} }
} }

View File

@@ -260,7 +260,8 @@
"Type": 26, "Type": 26,
"Domains": [ "Domains": [
"steampowered.com", "steampowered.com",
"steamcommunity.com" "steamcommunity.com",
"steamgames.com"
], ],
"Excluded": false "Excluded": false
}, },

View File

@@ -19,14 +19,18 @@ pub fn routes() -> Vec<Route> {
get_public_keys, get_public_keys,
post_keys, post_keys,
post_password, post_password,
post_kdf,
post_sstamp, post_sstamp,
post_email_token, post_email_token,
post_email, post_email,
delete_account, delete_account,
post_delete_account,
revision_date, revision_date,
password_hint, password_hint,
prelogin,
sync, sync,
sync_no_query,
get_ciphers, get_ciphers,
get_cipher, get_cipher,
@@ -42,6 +46,7 @@ pub fn routes() -> Vec<Route> {
delete_attachment_post, delete_attachment_post,
delete_attachment_post_admin, delete_attachment_post_admin,
delete_attachment, delete_attachment,
delete_attachment_admin,
post_cipher_admin, post_cipher_admin,
post_cipher_share, post_cipher_share,
put_cipher_share, put_cipher_share,
@@ -51,6 +56,7 @@ pub fn routes() -> Vec<Route> {
delete_cipher_post, delete_cipher_post,
delete_cipher_post_admin, delete_cipher_post_admin,
delete_cipher, delete_cipher,
delete_cipher_admin,
delete_cipher_selected, delete_cipher_selected,
delete_cipher_selected_post, delete_cipher_selected_post,
delete_all, delete_all,
@@ -74,6 +80,7 @@ pub fn routes() -> Vec<Route> {
activate_authenticator, activate_authenticator,
activate_authenticator_put, activate_authenticator_put,
generate_u2f, generate_u2f,
generate_u2f_challenge,
activate_u2f, activate_u2f,
activate_u2f_put, activate_u2f_put,
@@ -107,12 +114,15 @@ pub fn routes() -> Vec<Route> {
put_organization_user, put_organization_user,
delete_user, delete_user,
post_delete_user, post_delete_user,
post_reinvite_user,
post_org_import,
clear_device_token, clear_device_token,
put_device_token, put_device_token,
get_eq_domains, get_eq_domains,
post_eq_domains, post_eq_domains,
put_eq_domains,
] ]
} }
@@ -144,9 +154,10 @@ fn clear_device_token(uuid: String, data: Json<Value>, headers: Headers, conn: D
err!("Device not owned by user") err!("Device not owned by user")
} }
device.delete(&conn); match device.delete(&conn) {
Ok(()) => Ok(()),
Ok(()) Err(_) => err!("Failed deleting device")
}
} }
#[put("/devices/identifier/<uuid>/token", data = "<data>")] #[put("/devices/identifier/<uuid>/token", data = "<data>")]
@@ -207,7 +218,7 @@ struct EquivDomainData {
} }
#[post("/settings/domains", data = "<data>")] #[post("/settings/domains", data = "<data>")]
fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> EmptyResult { fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EquivDomainData = data.into_inner().data; let data: EquivDomainData = data.into_inner().data;
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default(); let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
@@ -219,7 +230,14 @@ fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: Db
user.excluded_globals = to_string(&excluded_globals).unwrap_or("[]".to_string()); user.excluded_globals = to_string(&excluded_globals).unwrap_or("[]".to_string());
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or("[]".to_string()); user.equivalent_domains = to_string(&equivalent_domains).unwrap_or("[]".to_string());
user.save(&conn); match user.save(&conn) {
Ok(()) => Ok(Json(json!({}))),
Err(_) => err!("Failed to save user")
}
Ok(()) }
#[put("/settings/domains", data = "<data>")]
fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
post_eq_domains(data, headers, conn)
} }

View File

@@ -1,11 +1,10 @@
#![allow(unused_imports)] use rocket::State;
use rocket_contrib::{Json, Value}; use rocket_contrib::{Json, Value};
use CONFIG;
use db::DbConn; use db::DbConn;
use db::models::*; use db::models::*;
use api::{PasswordData, JsonResult, EmptyResult, NumberOrString, JsonUpcase}; use api::{PasswordData, JsonResult, EmptyResult, NumberOrString, JsonUpcase, WebSocketUsers, UpdateType};
use auth::{Headers, AdminHeaders, OwnerHeaders}; use auth::{Headers, AdminHeaders, OwnerHeaders};
use serde::{Deserialize, Deserializer}; use serde::{Deserialize, Deserializer};
@@ -50,9 +49,16 @@ fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn
user_org.type_ = UserOrgType::Owner as i32; user_org.type_ = UserOrgType::Owner as i32;
user_org.status = UserOrgStatus::Confirmed as i32; user_org.status = UserOrgStatus::Confirmed as i32;
org.save(&conn); if org.save(&conn).is_err() {
user_org.save(&conn); err!("Failed creating organization")
collection.save(&conn); }
if user_org.save(&conn).is_err() {
err!("Failed to add user to organization")
}
if collection.save(&conn).is_err() {
err!("Failed creating Collection");
}
Ok(Json(org.to_json())) Ok(Json(org.to_json()))
} }
@@ -127,9 +133,11 @@ fn post_organization(org_id: String, _headers: OwnerHeaders, data: JsonUpcase<Or
org.name = data.Name; org.name = data.Name;
org.billing_email = data.BillingEmail; org.billing_email = data.BillingEmail;
org.save(&conn);
Ok(Json(org.to_json())) match org.save(&conn) {
Ok(()) => Ok(Json(org.to_json())),
Err(_) => err!("Failed to modify organization")
}
} }
// GET /api/collections?writeOnly=false // GET /api/collections?writeOnly=false
@@ -140,10 +148,10 @@ fn get_user_collections(headers: Headers, conn: DbConn) -> JsonResult {
"Data": "Data":
Collection::find_by_user_uuid(&headers.user.uuid, &conn) Collection::find_by_user_uuid(&headers.user.uuid, &conn)
.iter() .iter()
.map(|collection| { .map(Collection::to_json)
collection.to_json() .collect::<Value>(),
}).collect::<Value>(), "Object": "list",
"Object": "list" "ContinuationToken": null,
}))) })))
} }
@@ -153,10 +161,10 @@ fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) ->
"Data": "Data":
Collection::find_by_organization(&org_id, &conn) Collection::find_by_organization(&org_id, &conn)
.iter() .iter()
.map(|collection| { .map(Collection::to_json)
collection.to_json() .collect::<Value>(),
}).collect::<Value>(), "Object": "list",
"Object": "list" "ContinuationToken": null,
}))) })))
} }
@@ -171,7 +179,9 @@ fn post_organization_collections(org_id: String, _headers: AdminHeaders, data: J
let mut collection = Collection::new(org.uuid.clone(), data.Name); let mut collection = Collection::new(org.uuid.clone(), data.Name);
collection.save(&conn); if collection.save(&conn).is_err() {
err!("Failed saving Collection");
}
Ok(Json(collection.to_json())) Ok(Json(collection.to_json()))
} }
@@ -200,7 +210,9 @@ fn post_organization_collection_update(org_id: String, col_id: String, _headers:
} }
collection.name = data.Name.clone(); collection.name = data.Name.clone();
collection.save(&conn); if collection.save(&conn).is_err() {
err!("Failed updating Collection");
}
Ok(Json(collection.to_json())) Ok(Json(collection.to_json()))
} }
@@ -217,7 +229,7 @@ fn delete_organization_collection_user(org_id: String, col_id: String, org_user_
} }
}; };
match UserOrganization::find_by_uuid(&org_user_id, &conn) { match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) {
None => err!("User not found in organization"), None => err!("User not found in organization"),
Some(user_org) => { Some(user_org) => {
match CollectionUser::find_by_collection_and_user(&collection.uuid, &user_org.user_uuid, &conn) { match CollectionUser::find_by_collection_and_user(&collection.uuid, &user_org.user_uuid, &conn) {
@@ -292,12 +304,13 @@ fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders,
.iter().map(|col_user| { .iter().map(|col_user| {
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn) UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
.unwrap() .unwrap()
.to_json_collection_user_details(&col_user.read_only, &conn) .to_json_collection_user_details(col_user.read_only, &conn)
}).collect(); }).collect();
Ok(Json(json!({ Ok(Json(json!({
"Data": user_list, "Data": user_list,
"Object": "list" "Object": "list",
"ContinuationToken": null,
}))) })))
} }
@@ -315,22 +328,19 @@ fn get_org_details(data: OrgIdData, headers: Headers, conn: DbConn) -> JsonResul
Ok(Json(json!({ Ok(Json(json!({
"Data": ciphers_json, "Data": ciphers_json,
"Object": "list", "Object": "list",
"ContinuationToken": null,
}))) })))
} }
#[get("/organizations/<org_id>/users")] #[get("/organizations/<org_id>/users")]
fn get_org_users(org_id: String, headers: AdminHeaders, conn: DbConn) -> JsonResult { fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
Some(_) => (),
None => err!("User isn't member of organization")
}
let users = UserOrganization::find_by_org(&org_id, &conn); let users = UserOrganization::find_by_org(&org_id, &conn);
let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect(); let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect();
Ok(Json(json!({ Ok(Json(json!({
"Data": users_json, "Data": users_json,
"Object": "list" "Object": "list",
"ContinuationToken": null,
}))) })))
} }
@@ -373,18 +383,42 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
err!("Only Owners can invite Admins or Owners") err!("Only Owners can invite Admins or Owners")
} }
for user_opt in data.Emails.iter().map(|email| User::find_by_mail(email, &conn)) { for email in data.Emails.iter() {
match user_opt { let mut user_org_status = UserOrgStatus::Accepted as i32;
None => err!("User email does not exist"), let user = match User::find_by_mail(&email, &conn) {
Some(user) => { None => if CONFIG.invitations_allowed { // Invite user if that's enabled
if UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn).is_some() { let mut invitation = Invitation::new(email.clone());
err!("User already in organization") match invitation.save(&conn) {
Ok(()) => {
let mut user = User::new(email.clone());
if user.save(&conn).is_err() {
err!("Failed to create placeholder for invited user")
} else {
user_org_status = UserOrgStatus::Invited as i32;
user
}
}
Err(_) => err!(format!("Failed to invite: {}", email))
} }
} else {
err!(format!("User email does not exist: {}", email))
},
Some(user) => if UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn).is_some() {
err!(format!("User already in organization: {}", email))
} else {
user
}
};
// Don't create UserOrganization in virtual organization
if org_id != Organization::VIRTUAL_ID {
let mut new_user = UserOrganization::new(user.uuid.clone(), org_id.clone()); let mut new_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
let access_all = data.AccessAll.unwrap_or(false); let access_all = data.AccessAll.unwrap_or(false);
new_user.access_all = access_all; new_user.access_all = access_all;
new_user.type_ = new_type; new_user.type_ = new_type;
new_user.status = user_org_status;
// If no accessAll, add the collections received // If no accessAll, add the collections received
if !access_all { if !access_all {
@@ -400,7 +434,8 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
} }
} }
new_user.save(&conn); if new_user.save(&conn).is_err() {
err!("Failed to add user to organization")
} }
} }
} }
@@ -408,19 +443,15 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
Ok(()) Ok(())
} }
#[post("/organizations/<org_id>/users/<user_id>/confirm", data = "<data>")] #[post("/organizations/<org_id>/users/<org_user_id>/confirm", data = "<data>")]
fn confirm_invite(org_id: String, user_id: String, data: JsonUpcase<Value>, headers: AdminHeaders, conn: DbConn) -> EmptyResult { fn confirm_invite(org_id: String, org_user_id: String, data: JsonUpcase<Value>, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let data = data.into_inner().data; let data = data.into_inner().data;
let mut user_to_confirm = match UserOrganization::find_by_uuid(&user_id, &conn) { let mut user_to_confirm = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) {
Some(user) => user, Some(user) => user,
None => err!("Failed to find user membership") None => err!("The specified user isn't a member of the organization")
}; };
if user_to_confirm.org_uuid != org_id {
err!("The specified user isn't a member of the organization")
}
if user_to_confirm.type_ != UserOrgType::User as i32 && if user_to_confirm.type_ != UserOrgType::User as i32 &&
headers.org_user_type != UserOrgType::Owner as i32 { headers.org_user_type != UserOrgType::Owner as i32 {
err!("Only Owners can confirm Admins or Owners") err!("Only Owners can confirm Admins or Owners")
@@ -436,22 +467,19 @@ fn confirm_invite(org_id: String, user_id: String, data: JsonUpcase<Value>, head
None => err!("Invalid key provided") None => err!("Invalid key provided")
}; };
user_to_confirm.save(&conn); match user_to_confirm.save(&conn) {
Ok(()) => Ok(()),
Ok(()) Err(_) => err!("Failed to add user to organization")
}
} }
#[get("/organizations/<org_id>/users/<user_id>")] #[get("/organizations/<org_id>/users/<org_user_id>")]
fn get_user(org_id: String, user_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult { fn get_user(org_id: String, org_user_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
let user = match UserOrganization::find_by_uuid(&user_id, &conn) { let user = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) {
Some(user) => user, Some(user) => user,
None => err!("Failed to find user membership") None => err!("The specified user isn't a member of the organization")
}; };
if user.org_uuid != org_id {
err!("The specified user isn't a member of the organization")
}
Ok(Json(user.to_json_details(&conn))) Ok(Json(user.to_json_details(&conn)))
} }
@@ -464,13 +492,13 @@ struct EditUserData {
AccessAll: bool, AccessAll: bool,
} }
#[put("/organizations/<org_id>/users/<user_id>", data = "<data>", rank = 1)] #[put("/organizations/<org_id>/users/<org_user_id>", data = "<data>", rank = 1)]
fn put_organization_user(org_id: String, user_id: String, data: JsonUpcase<EditUserData>, headers: AdminHeaders, conn: DbConn) -> EmptyResult { fn put_organization_user(org_id: String, org_user_id: String, data: JsonUpcase<EditUserData>, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
edit_user(org_id, user_id, data, headers, conn) edit_user(org_id, org_user_id, data, headers, conn)
} }
#[post("/organizations/<org_id>/users/<user_id>", data = "<data>", rank = 1)] #[post("/organizations/<org_id>/users/<org_user_id>", data = "<data>", rank = 1)]
fn edit_user(org_id: String, user_id: String, data: JsonUpcase<EditUserData>, headers: AdminHeaders, conn: DbConn) -> EmptyResult { fn edit_user(org_id: String, org_user_id: String, data: JsonUpcase<EditUserData>, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let data: EditUserData = data.into_inner().data; let data: EditUserData = data.into_inner().data;
let new_type = match UserOrgType::from_str(&data.Type.into_string()) { let new_type = match UserOrgType::from_str(&data.Type.into_string()) {
@@ -478,19 +506,22 @@ fn edit_user(org_id: String, user_id: String, data: JsonUpcase<EditUserData>, he
None => err!("Invalid type") None => err!("Invalid type")
}; };
let mut user_to_edit = match UserOrganization::find_by_uuid(&user_id, &conn) { let mut user_to_edit = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) {
Some(user) => user, Some(user) => user,
None => err!("The specified user isn't member of the organization") None => err!("The specified user isn't member of the organization")
}; };
if new_type != UserOrgType::User as i32 && if new_type != user_to_edit.type_ as i32 && (
user_to_edit.type_ <= UserOrgType::Admin as i32 ||
new_type <= UserOrgType::Admin as i32
) &&
headers.org_user_type != UserOrgType::Owner as i32 { headers.org_user_type != UserOrgType::Owner as i32 {
err!("Only Owners can grant Admin or Owner type") err!("Only Owners can grant and remove Admin or Owner privileges")
} }
if user_to_edit.type_ != UserOrgType::User as i32 && if user_to_edit.type_ == UserOrgType::Owner as i32 &&
headers.org_user_type != UserOrgType::Owner as i32 { headers.org_user_type != UserOrgType::Owner as i32 {
err!("Only Owners can edit Admin or Owner") err!("Only Owners can edit Owner users")
} }
if user_to_edit.type_ == UserOrgType::Owner as i32 && if user_to_edit.type_ == UserOrgType::Owner as i32 &&
@@ -530,14 +561,32 @@ fn edit_user(org_id: String, user_id: String, data: JsonUpcase<EditUserData>, he
} }
} }
user_to_edit.save(&conn); match user_to_edit.save(&conn) {
Ok(()) => Ok(()),
Ok(()) Err(_) => err!("Failed to save user data")
}
} }
#[delete("/organizations/<org_id>/users/<user_id>")] #[delete("/organizations/<org_id>/users/<org_user_id>")]
fn delete_user(org_id: String, user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult { fn delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let user_to_delete = match UserOrganization::find_by_uuid(&user_id, &conn) { // We're deleting user in virtual Organization. Delete User, not UserOrganization
if org_id == Organization::VIRTUAL_ID {
match User::find_by_uuid(&org_user_id, &conn) {
Some(user_to_delete) => {
if user_to_delete.uuid == headers.user.uuid {
err!("Delete your account in the account settings")
} else {
match user_to_delete.delete(&conn) {
Ok(()) => return Ok(()),
Err(_) => err!("Failed to delete user - likely because it's the only owner of organization")
}
}
},
None => err!("User not found")
}
}
let user_to_delete = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) {
Some(user) => user, Some(user) => user,
None => err!("User to delete isn't member of the organization") None => err!("User to delete isn't member of the organization")
}; };
@@ -564,7 +613,91 @@ fn delete_user(org_id: String, user_id: String, headers: AdminHeaders, conn: DbC
} }
} }
#[post("/organizations/<org_id>/users/<user_id>/delete")] #[post("/organizations/<org_id>/users/<org_user_id>/delete")]
fn post_delete_user(org_id: String, user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult { fn post_delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
delete_user(org_id, user_id, headers, conn) delete_user(org_id, org_user_id, headers, conn)
}
#[post("/organizations/<_org_id>/users/<_org_user_id>/reinvite")]
fn post_reinvite_user(_org_id: String, _org_user_id: String, _headers: AdminHeaders, _conn: DbConn) -> EmptyResult {
err!("This functionality is not implemented. The user needs to manually register before they can be accepted into the organization.")
}
use super::ciphers::CipherData;
use super::ciphers::update_cipher_from_data;
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct ImportData {
Ciphers: Vec<CipherData>,
Collections: Vec<NewCollectionData>,
CollectionRelationships: Vec<RelationsData>,
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct RelationsData {
// Cipher index
Key: usize,
// Collection index
Value: usize,
}
#[post("/ciphers/import-organization?<query>", data = "<data>")]
fn post_org_import(query: OrgIdData, data: JsonUpcase<ImportData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let data: ImportData = data.into_inner().data;
let org_id = query.organizationId;
let org_user = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
Some(user) => user,
None => err!("User is not part of the organization")
};
if org_user.type_ > UserOrgType::Admin as i32 {
err!("Only admins or owners can import into an organization")
}
// Read and create the collections
let collections: Vec<_> = data.Collections.into_iter().map(|coll| {
let mut collection = Collection::new(org_id.clone(), coll.Name);
if collection.save(&conn).is_err() {
err!("Failed to create Collection");
}
Ok(collection)
}).collect();
// Read the relations between collections and ciphers
let mut relations = Vec::new();
for relation in data.CollectionRelationships {
relations.push((relation.Key, relation.Value));
}
// Read and create the ciphers
let ciphers: Vec<_> = data.Ciphers.into_iter().map(|cipher_data| {
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &ws, UpdateType::SyncCipherCreate).ok();
cipher
}).collect();
// Assign the collections
for (cipher_index, coll_index) in relations {
let cipher_id = &ciphers[cipher_index].uuid;
let coll = &collections[coll_index];
let coll_id = match coll {
Ok(coll) => coll.uuid.as_str(),
Err(_) => err!("Failed to assign to collection")
};
match CollectionCipher::save(cipher_id, coll_id, &conn) {
Ok(()) => (),
Err(_) => err!("Failed to add cipher to collection")
};
}
let mut user = headers.user;
match user.update_revision(&conn) {
Ok(()) => Ok(()),
Err(_) => err!("Failed to update the revision, please log out and log back in to finish import.")
}
} }

View File

@@ -19,7 +19,8 @@ fn get_twofactor(headers: Headers, conn: DbConn) -> JsonResult {
Ok(Json(json!({ Ok(Json(json!({
"Data": twofactors_json, "Data": twofactors_json,
"Object": "list" "Object": "list",
"ContinuationToken": null,
}))) })))
} }
@@ -74,9 +75,10 @@ fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
// Remove the recovery code, not needed without twofactors // Remove the recovery code, not needed without twofactors
user.totp_recover = None; user.totp_recover = None;
user.save(&conn); match user.save(&conn) {
Ok(()) => Ok(Json(json!({}))),
Ok(Json(json!({}))) Err(_) => err!("Failed to remove the user's two factor recovery code")
}
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@@ -216,7 +218,9 @@ fn _generate_recover_code(user: &mut User, conn: &DbConn) {
if user.totp_recover.is_none() { if user.totp_recover.is_none() {
let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20])); let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20]));
user.totp_recover = Some(totp_recover); user.totp_recover = Some(totp_recover);
user.save(conn); if user.save(conn).is_err() {
println!("Error: Failed to save the user's two factor recovery code")
}
} }
} }
@@ -248,24 +252,31 @@ fn generate_u2f(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn)
let user_uuid = &headers.user.uuid; let user_uuid = &headers.user.uuid;
let u2f_type = TwoFactorType::U2f as i32; let u2f_type = TwoFactorType::U2f as i32;
let register_type = TwoFactorType::U2fRegisterChallenge; let enabled = TwoFactor::find_by_user_and_type(user_uuid, u2f_type, &conn).is_some();
let (enabled, challenge) = match TwoFactor::find_by_user_and_type(user_uuid, u2f_type, &conn) {
Some(_) => (true, String::new()),
None => {
let c = _create_u2f_challenge(user_uuid, register_type, &conn);
(false, c.challenge)
}
};
Ok(Json(json!({ Ok(Json(json!({
"Enabled": enabled, "Enabled": enabled,
"Challenge": { "Object": "twoFactorU2f"
})))
}
#[post("/two-factor/get-u2f-challenge", data = "<data>")]
fn generate_u2f_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let user_uuid = &headers.user.uuid;
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fRegisterChallenge, &conn).challenge;
Ok(Json(json!({
"UserId": headers.user.uuid, "UserId": headers.user.uuid,
"AppId": APP_ID.to_string(), "AppId": APP_ID.to_string(),
"Challenge": challenge, "Challenge": challenge,
"Version": U2F_VERSION, "Version": U2F_VERSION,
},
"Object": "twoFactorU2f"
}))) })))
} }
@@ -293,7 +304,7 @@ impl RegisterResponseCopy {
RegisterResponse { RegisterResponse {
registration_data: self.registration_data, registration_data: self.registration_data,
version: self.version, version: self.version,
challenge: challenge, challenge,
client_data: self.client_data, client_data: self.client_data,
} }
} }

View File

@@ -18,7 +18,7 @@ fn icon(domain: String) -> Content<Vec<u8>> {
let icon_type = ContentType::new("image", "x-icon"); let icon_type = ContentType::new("image", "x-icon");
// Validate the domain to avoid directory traversal attacks // Validate the domain to avoid directory traversal attacks
if domain.contains("/") || domain.contains("..") { if domain.contains('/') || domain.contains("..") {
return Content(icon_type, get_fallback_icon()); return Content(icon_type, get_fallback_icon());
} }

View File

@@ -1,4 +1,5 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use rocket::request::{self, Form, FormItems, FromForm, FromRequest, Request}; use rocket::request::{self, Form, FormItems, FromForm, FromRequest, Request};
use rocket::{Outcome, Route}; use rocket::{Outcome, Route};
@@ -21,12 +22,12 @@ pub fn routes() -> Vec<Route> {
} }
#[post("/connect/token", data = "<connect_data>")] #[post("/connect/token", data = "<connect_data>")]
fn login(connect_data: Form<ConnectData>, device_type: DeviceType, conn: DbConn) -> JsonResult { fn login(connect_data: Form<ConnectData>, device_type: DeviceType, conn: DbConn, socket: Option<SocketAddr>) -> JsonResult {
let data = connect_data.get(); let data = connect_data.get();
match data.grant_type { match data.grant_type {
GrantType::RefreshToken => _refresh_login(data, device_type, conn), GrantType::RefreshToken => _refresh_login(data, device_type, conn),
GrantType::Password => _password_login(data, device_type, conn), GrantType::Password => _password_login(data, device_type, conn, socket),
} }
} }
@@ -45,19 +46,26 @@ fn _refresh_login(data: &ConnectData, _device_type: DeviceType, conn: DbConn) ->
let orgs = UserOrganization::find_by_user(&user.uuid, &conn); let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
let (access_token, expires_in) = device.refresh_tokens(&user, orgs); let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
device.save(&conn); match device.save(&conn) {
Ok(()) => Ok(Json(json!({
Ok(Json(json!({
"access_token": access_token, "access_token": access_token,
"expires_in": expires_in, "expires_in": expires_in,
"token_type": "Bearer", "token_type": "Bearer",
"refresh_token": device.refresh_token, "refresh_token": device.refresh_token,
"Key": user.key, "Key": user.key,
"PrivateKey": user.private_key, "PrivateKey": user.private_key,
}))) }))),
Err(_) => err!("Failed to add device to user")
}
} }
fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn) -> JsonResult { fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn, remote: Option<SocketAddr>) -> JsonResult {
// Get the ip for error reporting
let ip = match remote {
Some(ip) => ip.ip(),
None => IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
};
// Validate scope // Validate scope
let scope = data.get("scope"); let scope = data.get("scope");
if scope != "api offline_access" { if scope != "api offline_access" {
@@ -68,13 +76,19 @@ fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn) ->
let username = data.get("username"); let username = data.get("username");
let user = match User::find_by_mail(username, &conn) { let user = match User::find_by_mail(username, &conn) {
Some(user) => user, Some(user) => user,
None => err!("Username or password is incorrect. Try again."), None => err!(format!(
"Username or password is incorrect. Try again. IP: {}. Username: {}.",
ip, username
)),
}; };
// Check password // Check password
let password = data.get("password"); let password = data.get("password");
if !user.check_valid_password(password) { if !user.check_valid_password(password) {
err!("Username or password is incorrect. Try again.") err!(format!(
"Username or password is incorrect. Try again. IP: {}. Username: {}.",
ip, username
))
} }
// Let's only use the header and ignore the 'devicetype' parameter // Let's only use the header and ignore the 'devicetype' parameter
@@ -94,12 +108,14 @@ fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn) ->
Some(device) => { Some(device) => {
// Check if valid device // Check if valid device
if device.user_uuid != user.uuid { if device.user_uuid != user.uuid {
device.delete(&conn); match device.delete(&conn) {
err!("Device is not owned by user") Ok(()) => Device::new(device_id, user.uuid.clone(), device_name, device_type_num),
Err(_) => err!("Tried to delete device not owned by user, but failed")
} }
} else {
device device
} }
}
None => { None => {
// Create new device // Create new device
Device::new(device_id, user.uuid.clone(), device_name, device_type_num) Device::new(device_id, user.uuid.clone(), device_name, device_type_num)
@@ -113,7 +129,9 @@ fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn) ->
let orgs = UserOrganization::find_by_user(&user.uuid, &conn); let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
let (access_token, expires_in) = device.refresh_tokens(&user, orgs); let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
device.save(&conn); if device.save(&conn).is_err() {
err!("Failed to add device to user")
}
let mut result = json!({ let mut result = json!({
"access_token": access_token, "access_token": access_token,
@@ -145,11 +163,11 @@ fn twofactor_auth(
let providers: Vec<_> = twofactors.iter().map(|tf| tf.type_).collect(); let providers: Vec<_> = twofactors.iter().map(|tf| tf.type_).collect();
// No twofactor token if twofactor is disabled // No twofactor token if twofactor is disabled
if twofactors.len() == 0 { if twofactors.is_empty() {
return Ok(None); return Ok(None);
} }
let provider = match util::parse_option_string(data.get_opt("twoFactorProvider")) { let provider = match util::try_parse_string(data.get_opt("twoFactorProvider")) {
Some(provider) => provider, Some(provider) => provider,
None => providers[0], // If we aren't given a two factor provider, asume the first one None => providers[0], // If we aren't given a two factor provider, asume the first one
}; };
@@ -194,7 +212,7 @@ fn twofactor_auth(
_ => err!("Invalid two factor provider"), _ => err!("Invalid two factor provider"),
} }
if util::parse_option_string(data.get_opt("twoFactorRemember")).unwrap_or(0) == 1 { if util::try_parse_string_or(data.get_opt("twoFactorRemember"), 0) == 1 {
Ok(Some(device.refresh_twofactor_remember())) Ok(Some(device.refresh_twofactor_remember()))
} else { } else {
device.delete_twofactor_remember(); device.delete_twofactor_remember();
@@ -261,7 +279,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for DeviceType {
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> { fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
let headers = request.headers(); let headers = request.headers();
let type_opt = headers.get_one("Device-Type"); let type_opt = headers.get_one("Device-Type");
let type_num = util::parse_option_string(type_opt).unwrap_or(0); let type_num = util::try_parse_string_or(type_opt, 0);
Outcome::Success(DeviceType(type_num)) Outcome::Success(DeviceType(type_num))
} }

View File

@@ -2,11 +2,14 @@ pub(crate) mod core;
mod icons; mod icons;
mod identity; mod identity;
mod web; mod web;
mod notifications;
pub use self::core::routes as core_routes; pub use self::core::routes as core_routes;
pub use self::icons::routes as icons_routes; pub use self::icons::routes as icons_routes;
pub use self::identity::routes as identity_routes; pub use self::identity::routes as identity_routes;
pub use self::web::routes as web_routes; pub use self::web::routes as web_routes;
pub use self::notifications::routes as notifications_routes;
pub use self::notifications::{start_notification_server, WebSocketUsers, UpdateType};
use rocket::response::status::BadRequest; use rocket::response::status::BadRequest;
use rocket_contrib::Json; use rocket_contrib::Json;

373
src/api/notifications.rs Normal file
View File

@@ -0,0 +1,373 @@
use rocket::Route;
use rocket_contrib::Json;
use serde_json::Value as JsonValue;
use api::JsonResult;
use auth::Headers;
use db::DbConn;
use CONFIG;
pub fn routes() -> Vec<Route> {
routes![negotiate, websockets_err]
}
#[get("/hub")]
fn websockets_err() -> JsonResult {
err!("'/notifications/hub' should be proxied towards the websocket server, otherwise notifications will not work. Go to the README for more info.")
}
#[post("/hub/negotiate")]
fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
use crypto;
use data_encoding::BASE64URL;
let conn_id = BASE64URL.encode(&crypto::get_random(vec![0u8; 16]));
let mut available_transports: Vec<JsonValue> = Vec::new();
if CONFIG.websocket_enabled {
available_transports.push(json!({"transport":"WebSockets", "transferFormats":["Text","Binary"]}));
}
// TODO: Implement transports
// Rocket WS support: https://github.com/SergioBenitez/Rocket/issues/90
// Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33
// {"transport":"ServerSentEvents", "transferFormats":["Text"]},
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
Ok(Json(json!({
"connectionId": conn_id,
"availableTransports": available_transports
})))
}
///
/// Websockets server
///
use std::sync::Arc;
use std::thread;
use ws::{self, util::Token, Factory, Handler, Handshake, Message, Sender, WebSocket};
use chashmap::CHashMap;
use chrono::NaiveDateTime;
use serde_json::from_str;
use db::models::{Cipher, Folder, User};
use rmpv::Value;
fn serialize(val: Value) -> Vec<u8> {
use rmpv::encode::write_value;
let mut buf = Vec::new();
write_value(&mut buf, &val).expect("Error encoding MsgPack");
// Add size bytes at the start
// Extracted from BinaryMessageFormat.js
let mut size: usize = buf.len();
let mut len_buf: Vec<u8> = Vec::new();
loop {
let mut size_part = size & 0x7f;
size >>= 7;
if size > 0 {
size_part |= 0x80;
}
len_buf.push(size_part as u8);
if size == 0 {
break;
}
}
len_buf.append(&mut buf);
len_buf
}
fn serialize_date(date: NaiveDateTime) -> Value {
let seconds: i64 = date.timestamp();
let nanos: i64 = date.timestamp_subsec_nanos() as i64;
let timestamp = nanos << 34 | seconds;
use byteorder::{BigEndian, WriteBytesExt};
let mut bs = [0u8; 8];
bs.as_mut()
.write_i64::<BigEndian>(timestamp)
.expect("Unable to write");
// -1 is Timestamp
// https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type
Value::Ext(-1, bs.to_vec())
}
fn convert_option<T: Into<Value>>(option: Option<T>) -> Value {
match option {
Some(a) => a.into(),
None => Value::Nil,
}
}
// Server WebSocket handler
pub struct WSHandler {
out: Sender,
user_uuid: Option<String>,
users: WebSocketUsers,
}
const RECORD_SEPARATOR: u8 = 0x1e;
const INITIAL_RESPONSE: [u8; 3] = [0x7b, 0x7d, RECORD_SEPARATOR]; // {, }, <RS>
#[derive(Deserialize)]
struct InitialMessage {
protocol: String,
version: i32,
}
const PING_MS: u64 = 15_000;
const PING: Token = Token(1);
impl Handler for WSHandler {
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
// TODO: Improve this split
let path = hs.request.resource();
let mut query_split: Vec<_> = path.split('?').nth(1).unwrap().split('&').collect();
query_split.sort();
let access_token = &query_split[0][13..];
let _id = &query_split[1][3..];
// Validate the user
use auth;
let claims = match auth::decode_jwt(access_token) {
Ok(claims) => claims,
Err(_) => {
return Err(ws::Error::new(
ws::ErrorKind::Internal,
"Invalid access token provided",
))
}
};
// Assign the user to the handler
let user_uuid = claims.sub;
self.user_uuid = Some(user_uuid.clone());
// Add the current Sender to the user list
let handler_insert = self.out.clone();
let handler_update = self.out.clone();
self.users.map.upsert(
user_uuid,
|| vec![handler_insert],
|ref mut v| v.push(handler_update),
);
// Schedule a ping to keep the connection alive
self.out.timeout(PING_MS, PING)
}
fn on_message(&mut self, msg: Message) -> ws::Result<()> {
println!("Server got message '{}'. ", msg);
if let Message::Text(text) = msg.clone() {
let json = &text[..text.len() - 1]; // Remove last char
if let Ok(InitialMessage { protocol, version }) = from_str::<InitialMessage>(json) {
if &protocol == "messagepack" && version == 1 {
return self.out.send(&INITIAL_RESPONSE[..]); // Respond to initial message
}
}
}
// If it's not the initial message, just echo the message
self.out.send(msg)
}
fn on_timeout(&mut self, event: Token) -> ws::Result<()> {
if event == PING {
// send ping
self.out.send(create_ping())?;
// reschedule the timeout
self.out.timeout(PING_MS, PING)
} else {
Err(ws::Error::new(
ws::ErrorKind::Internal,
"Invalid timeout token provided",
))
}
}
}
struct WSFactory {
pub users: WebSocketUsers,
}
impl WSFactory {
pub fn init() -> Self {
WSFactory {
users: WebSocketUsers {
map: Arc::new(CHashMap::new()),
},
}
}
}
impl Factory for WSFactory {
type Handler = WSHandler;
fn connection_made(&mut self, out: Sender) -> Self::Handler {
println!("WS: Connection made");
WSHandler {
out,
user_uuid: None,
users: self.users.clone(),
}
}
fn connection_lost(&mut self, handler: Self::Handler) {
println!("WS: Connection lost");
// Remove handler
let user_uuid = &handler.user_uuid.unwrap();
if let Some(mut user_conn) = self.users.map.get_mut(user_uuid) {
user_conn.remove_item(&handler.out);
}
}
}
#[derive(Clone)]
pub struct WebSocketUsers {
pub map: Arc<CHashMap<String, Vec<Sender>>>,
}
impl WebSocketUsers {
fn send_update(&self, user_uuid: &String, data: Vec<u8>) -> ws::Result<()> {
if let Some(user) = self.map.get(user_uuid) {
for sender in user.iter() {
sender.send(data.clone())?;
}
}
Ok(())
}
// NOTE: The last modified date needs to be updated before calling these methods
#[allow(dead_code)]
pub fn send_user_update(&self, ut: UpdateType, user: &User) {
let data = create_update(
vec![
("UserId".into(), user.uuid.clone().into()),
("Date".into(), serialize_date(user.updated_at)),
],
ut,
);
self.send_update(&user.uuid.clone(), data).ok();
}
pub fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {
let data = create_update(
vec![
("Id".into(), folder.uuid.clone().into()),
("UserId".into(), folder.user_uuid.clone().into()),
("RevisionDate".into(), serialize_date(folder.updated_at)),
],
ut,
);
self.send_update(&folder.user_uuid, data).ok();
}
pub fn send_cipher_update(&self, ut: UpdateType, cipher: &Cipher, user_uuids: &Vec<String>) {
let user_uuid = convert_option(cipher.user_uuid.clone());
let org_uuid = convert_option(cipher.organization_uuid.clone());
let data = create_update(
vec![
("Id".into(), cipher.uuid.clone().into()),
("UserId".into(), user_uuid),
("OrganizationId".into(), org_uuid),
("CollectionIds".into(), Value::Nil),
("RevisionDate".into(), serialize_date(cipher.updated_at)),
],
ut,
);
for uuid in user_uuids {
self.send_update(&uuid, data.clone()).ok();
}
}
}
/* Message Structure
[
1, // MessageType.Invocation
{}, // Headers
null, // InvocationId
"ReceiveMessage", // Target
[ // Arguments
{
"ContextId": "app_id",
"Type": ut as i32,
"Payload": {}
}
]
]
*/
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType) -> Vec<u8> {
use rmpv::Value as V;
let value = V::Array(vec![
1.into(),
V::Array(vec![]),
V::Nil,
"ReceiveMessage".into(),
V::Array(vec![V::Map(vec![
("ContextId".into(), "app_id".into()),
("Type".into(), (ut as i32).into()),
("Payload".into(), payload.into()),
])]),
]);
serialize(value)
}
fn create_ping() -> Vec<u8> {
serialize(Value::Array(vec![6.into()]))
}
#[allow(dead_code)]
pub enum UpdateType {
SyncCipherUpdate = 0,
SyncCipherCreate = 1,
SyncLoginDelete = 2,
SyncFolderDelete = 3,
SyncCiphers = 4,
SyncVault = 5,
SyncOrgKeys = 6,
SyncFolderCreate = 7,
SyncFolderUpdate = 8,
SyncCipherDelete = 9,
SyncSettings = 10,
LogOut = 11,
}
pub fn start_notification_server() -> WebSocketUsers {
let factory = WSFactory::init();
let users = factory.users.clone();
if CONFIG.websocket_enabled {
thread::spawn(move || {
WebSocket::new(factory)
.unwrap()
.listen(&CONFIG.websocket_url)
.unwrap();
});
}
users
}

View File

@@ -95,7 +95,7 @@ use rocket::Outcome;
use rocket::request::{self, Request, FromRequest}; use rocket::request::{self, Request, FromRequest};
use db::DbConn; use db::DbConn;
use db::models::{User, UserOrganization, UserOrgType, UserOrgStatus, Device}; use db::models::{User, Organization, UserOrganization, UserOrgType, UserOrgStatus, Device};
pub struct Headers { pub struct Headers {
pub host: String, pub host: String,
@@ -212,7 +212,13 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
err_handler!("The current user isn't confirmed member of the organization") err_handler!("The current user isn't confirmed member of the organization")
} }
} }
None => err_handler!("The current user isn't member of the organization") None => {
if headers.user.is_server_admin() && org_id == Organization::VIRTUAL_ID {
UserOrganization::new_virtual(headers.user.uuid.clone(), UserOrgType::Owner, UserOrgStatus::Confirmed)
} else {
err_handler!("The current user isn't member of the organization")
}
}
}; };
Outcome::Success(Self{ Outcome::Success(Self{

View File

@@ -53,13 +53,11 @@ use db::schema::attachments;
/// Database methods /// Database methods
impl Attachment { impl Attachment {
pub fn save(&self, conn: &DbConn) -> bool { pub fn save(&self, conn: &DbConn) -> QueryResult<()> {
match diesel::replace_into(attachments::table) diesel::replace_into(attachments::table)
.values(self) .values(self)
.execute(&**conn) { .execute(&**conn)
Ok(1) => true, // One row inserted .and(Ok(()))
_ => false,
}
} }
pub fn delete(self, conn: &DbConn) -> QueryResult<()> { pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
@@ -80,7 +78,7 @@ impl Attachment {
println!("ERROR: Failed with 10 retries"); println!("ERROR: Failed with 10 retries");
return Err(err) return Err(err)
} else { } else {
retries = retries - 1; retries -= 1;
println!("Had to retry! Retries left: {}", retries); println!("Had to retry! Retries left: {}", retries);
thread::sleep(time::Duration::from_millis(500)); thread::sleep(time::Duration::from_millis(500));
continue continue

View File

@@ -32,6 +32,7 @@ pub struct Cipher {
pub data: String, pub data: String,
pub favorite: bool, pub favorite: bool,
pub password_history: Option<String>,
} }
/// Local methods /// Local methods
@@ -55,6 +56,7 @@ impl Cipher {
fields: None, fields: None,
data: String::new(), data: String::new(),
password_history: None,
} }
} }
} }
@@ -78,6 +80,10 @@ impl Cipher {
serde_json::from_str(fields).unwrap() serde_json::from_str(fields).unwrap()
} else { JsonValue::Null }; } else { JsonValue::Null };
let password_history_json: JsonValue = if let Some(ref password_history) = self.password_history {
serde_json::from_str(password_history).unwrap()
} else { JsonValue::Null };
let mut data_json: JsonValue = serde_json::from_str(&self.data).unwrap(); let mut data_json: JsonValue = serde_json::from_str(&self.data).unwrap();
// TODO: ******* Backwards compat start ********** // TODO: ******* Backwards compat start **********
@@ -108,6 +114,8 @@ impl Cipher {
"Object": "cipher", "Object": "cipher",
"Edit": true, "Edit": true,
"PasswordHistory": password_history_json,
}); });
let key = match self.type_ { let key = match self.type_ {
@@ -122,34 +130,38 @@ impl Cipher {
json_object json_object
} }
pub fn update_users_revision(&self, conn: &DbConn) { pub fn update_users_revision(&self, conn: &DbConn) -> Vec<String> {
let mut user_uuids = Vec::new();
match self.user_uuid { match self.user_uuid {
Some(ref user_uuid) => User::update_uuid_revision(&user_uuid, conn), Some(ref user_uuid) => {
User::update_uuid_revision(&user_uuid, conn);
user_uuids.push(user_uuid.clone())
},
None => { // Belongs to Organization, need to update affected users None => { // Belongs to Organization, need to update affected users
if let Some(ref org_uuid) = self.organization_uuid { if let Some(ref org_uuid) = self.organization_uuid {
UserOrganization::find_by_cipher_and_org(&self.uuid, &org_uuid, conn) UserOrganization::find_by_cipher_and_org(&self.uuid, &org_uuid, conn)
.iter() .iter()
.for_each(|user_org| { .for_each(|user_org| {
User::update_uuid_revision(&user_org.user_uuid, conn) User::update_uuid_revision(&user_org.user_uuid, conn);
user_uuids.push(user_org.user_uuid.clone())
}); });
} }
} }
}; };
user_uuids
} }
pub fn save(&mut self, conn: &DbConn) -> bool { pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
self.update_users_revision(conn); self.update_users_revision(conn);
self.updated_at = Utc::now().naive_utc(); self.updated_at = Utc::now().naive_utc();
match diesel::replace_into(ciphers::table) diesel::replace_into(ciphers::table)
.values(&*self) .values(&*self)
.execute(&**conn) { .execute(&**conn)
Ok(1) => true, // One row inserted .and(Ok(()))
_ => false,
}
} }
pub fn delete(self, conn: &DbConn) -> QueryResult<()> { pub fn delete(&self, conn: &DbConn) -> QueryResult<()> {
self.update_users_revision(conn); self.update_users_revision(conn);
FolderCipher::delete_all_by_cipher(&self.uuid, &conn)?; FolderCipher::delete_all_by_cipher(&self.uuid, &conn)?;
@@ -158,7 +170,7 @@ impl Cipher {
diesel::delete( diesel::delete(
ciphers::table.filter( ciphers::table.filter(
ciphers::uuid.eq(self.uuid) ciphers::uuid.eq(&self.uuid)
) )
).execute(&**conn).and(Ok(())) ).execute(&**conn).and(Ok(()))
} }
@@ -170,6 +182,13 @@ impl Cipher {
Ok(()) Ok(())
} }
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> QueryResult<()> {
for cipher in Self::find_owned_by_user(user_uuid, &conn) {
cipher.delete(&conn)?;
}
Ok(())
}
pub fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &DbConn) -> Result<(), &str> { pub fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &DbConn) -> Result<(), &str> {
match self.get_folder_uuid(&user_uuid, &conn) { match self.get_folder_uuid(&user_uuid, &conn) {
None => { None => {
@@ -348,6 +367,6 @@ impl Cipher {
) )
)) ))
.select(ciphers_collections::collection_uuid) .select(ciphers_collections::collection_uuid)
.load::<String>(&**conn).unwrap_or(vec![]) .load::<String>(&**conn).unwrap_or_default()
} }
} }

View File

@@ -42,13 +42,18 @@ use db::schema::*;
/// Database methods /// Database methods
impl Collection { impl Collection {
pub fn save(&mut self, conn: &DbConn) -> bool { pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
match diesel::replace_into(collections::table) // Update affected users revision
UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn)
.iter()
.for_each(|user_org| {
User::update_uuid_revision(&user_org.user_uuid, conn);
});
diesel::replace_into(collections::table)
.values(&*self) .values(&*self)
.execute(&**conn) { .execute(&**conn)
Ok(1) => true, // One row inserted .and(Ok(()))
_ => false,
}
} }
pub fn delete(self, conn: &DbConn) -> QueryResult<()> { pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
@@ -254,25 +259,19 @@ pub struct CollectionCipher {
/// Database methods /// Database methods
impl CollectionCipher { impl CollectionCipher {
pub fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> bool { pub fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> QueryResult<()> {
match diesel::replace_into(ciphers_collections::table) diesel::replace_into(ciphers_collections::table)
.values(( .values((
ciphers_collections::cipher_uuid.eq(cipher_uuid), ciphers_collections::cipher_uuid.eq(cipher_uuid),
ciphers_collections::collection_uuid.eq(collection_uuid), ciphers_collections::collection_uuid.eq(collection_uuid),
)).execute(&**conn) { )).execute(&**conn).and(Ok(()))
Ok(1) => true, // One row inserted
_ => false,
}
} }
pub fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> bool { pub fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> QueryResult<()> {
match diesel::delete(ciphers_collections::table diesel::delete(ciphers_collections::table
.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid)) .filter(ciphers_collections::cipher_uuid.eq(cipher_uuid))
.filter(ciphers_collections::collection_uuid.eq(collection_uuid))) .filter(ciphers_collections::collection_uuid.eq(collection_uuid)))
.execute(&**conn) { .execute(&**conn).and(Ok(()))
Ok(1) => true, // One row deleted
_ => false,
}
} }
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> QueryResult<()> { pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> QueryResult<()> {

View File

@@ -112,24 +112,24 @@ use db::schema::devices;
/// Database methods /// Database methods
impl Device { impl Device {
pub fn save(&mut self, conn: &DbConn) -> bool { pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
self.updated_at = Utc::now().naive_utc(); self.updated_at = Utc::now().naive_utc();
match diesel::replace_into(devices::table) diesel::replace_into(devices::table)
.values(&*self) .values(&*self).execute(&**conn).and(Ok(()))
.execute(&**conn) {
Ok(1) => true, // One row inserted
_ => false,
}
} }
pub fn delete(self, conn: &DbConn) -> bool { pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
match diesel::delete(devices::table.filter( diesel::delete(devices::table.filter(
devices::uuid.eq(self.uuid))) devices::uuid.eq(self.uuid)
.execute(&**conn) { )).execute(&**conn).and(Ok(()))
Ok(1) => true, // One row deleted
_ => false,
} }
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> QueryResult<()> {
for device in Self::find_by_user(user_uuid, &conn) {
device.delete(&conn)?;
}
Ok(())
} }
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> { pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {

View File

@@ -70,29 +70,32 @@ use db::schema::{folders, folders_ciphers};
/// Database methods /// Database methods
impl Folder { impl Folder {
pub fn save(&mut self, conn: &DbConn) -> bool { pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
User::update_uuid_revision(&self.user_uuid, conn); User::update_uuid_revision(&self.user_uuid, conn);
self.updated_at = Utc::now().naive_utc(); self.updated_at = Utc::now().naive_utc();
match diesel::replace_into(folders::table) diesel::replace_into(folders::table)
.values(&*self) .values(&*self).execute(&**conn).and(Ok(()))
.execute(&**conn) {
Ok(1) => true, // One row inserted
_ => false,
}
} }
pub fn delete(self, conn: &DbConn) -> QueryResult<()> { pub fn delete(&self, conn: &DbConn) -> QueryResult<()> {
User::update_uuid_revision(&self.user_uuid, conn); User::update_uuid_revision(&self.user_uuid, conn);
FolderCipher::delete_all_by_folder(&self.uuid, &conn)?; FolderCipher::delete_all_by_folder(&self.uuid, &conn)?;
diesel::delete( diesel::delete(
folders::table.filter( folders::table.filter(
folders::uuid.eq(self.uuid) folders::uuid.eq(&self.uuid)
) )
).execute(&**conn).and(Ok(())) ).execute(&**conn).and(Ok(()))
} }
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> QueryResult<()> {
for folder in Self::find_by_user(user_uuid, &conn) {
folder.delete(&conn)?;
}
Ok(())
}
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> { pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
folders::table folders::table
.filter(folders::uuid.eq(uuid)) .filter(folders::uuid.eq(uuid))

View File

@@ -12,7 +12,7 @@ pub use self::attachment::Attachment;
pub use self::cipher::Cipher; pub use self::cipher::Cipher;
pub use self::device::Device; pub use self::device::Device;
pub use self::folder::{Folder, FolderCipher}; pub use self::folder::{Folder, FolderCipher};
pub use self::user::User; pub use self::user::{User, Invitation};
pub use self::organization::Organization; pub use self::organization::Organization;
pub use self::organization::{UserOrganization, UserOrgStatus, UserOrgType}; pub use self::organization::{UserOrganization, UserOrgStatus, UserOrgType};
pub use self::collection::{Collection, CollectionUser, CollectionCipher}; pub use self::collection::{Collection, CollectionUser, CollectionCipher};

View File

@@ -1,7 +1,7 @@
use serde_json::Value as JsonValue; use serde_json::Value as JsonValue;
use uuid::Uuid; use uuid::Uuid;
use super::{User, CollectionUser}; use super::{User, CollectionUser, Invitation};
#[derive(Debug, Identifiable, Queryable, Insertable)] #[derive(Debug, Identifiable, Queryable, Insertable)]
#[table_name = "organizations"] #[table_name = "organizations"]
@@ -27,7 +27,7 @@ pub struct UserOrganization {
} }
pub enum UserOrgStatus { pub enum UserOrgStatus {
_Invited = 0, // Unused, users are accepted automatically Invited = 0,
Accepted = 1, Accepted = 1,
Confirmed = 2, Confirmed = 2,
} }
@@ -51,6 +51,8 @@ impl UserOrgType {
/// Local methods /// Local methods
impl Organization { impl Organization {
pub const VIRTUAL_ID: &'static str = "00000000-0000-0000-0000-000000000000";
pub fn new(name: String, billing_email: String) -> Self { pub fn new(name: String, billing_email: String) -> Self {
Self { Self {
uuid: Uuid::new_v4().to_string(), uuid: Uuid::new_v4().to_string(),
@@ -60,13 +62,21 @@ impl Organization {
} }
} }
pub fn new_virtual() -> Self {
Self {
uuid: String::from(Organization::VIRTUAL_ID),
name: String::from("bitwarden_rs"),
billing_email: String::from("none@none.none")
}
}
pub fn to_json(&self) -> JsonValue { pub fn to_json(&self) -> JsonValue {
json!({ json!({
"Id": self.uuid, "Id": self.uuid,
"Name": self.name, "Name": self.name,
"Seats": 10, "Seats": 10,
"MaxCollections": 10, "MaxCollections": 10,
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
"Use2fa": true, "Use2fa": true,
"UseDirectory": false, "UseDirectory": false,
"UseEvents": false, "UseEvents": false,
@@ -83,7 +93,7 @@ impl Organization {
"BillingEmail": self.billing_email, "BillingEmail": self.billing_email,
"Plan": "TeamsAnnually", "Plan": "TeamsAnnually",
"PlanType": 5, // TeamsAnnually plan "PlanType": 5, // TeamsAnnually plan
"UsersGetPremium": true,
"Object": "organization", "Object": "organization",
}) })
} }
@@ -103,6 +113,20 @@ impl UserOrganization {
type_: UserOrgType::User as i32, type_: UserOrgType::User as i32,
} }
} }
pub fn new_virtual(user_uuid: String, type_: UserOrgType, status: UserOrgStatus) -> Self {
Self {
uuid: user_uuid.clone(),
user_uuid,
org_uuid: String::from(Organization::VIRTUAL_ID),
access_all: true,
key: String::new(),
status: status as i32,
type_: type_ as i32,
}
}
} }
@@ -113,24 +137,28 @@ use db::schema::{organizations, users_organizations, users_collections, ciphers_
/// Database methods /// Database methods
impl Organization { impl Organization {
pub fn save(&mut self, conn: &DbConn) -> bool { pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
if self.uuid == Organization::VIRTUAL_ID {
return Err(diesel::result::Error::NotFound)
}
UserOrganization::find_by_org(&self.uuid, conn) UserOrganization::find_by_org(&self.uuid, conn)
.iter() .iter()
.for_each(|user_org| { .for_each(|user_org| {
User::update_uuid_revision(&user_org.user_uuid, conn); User::update_uuid_revision(&user_org.user_uuid, conn);
}); });
match diesel::replace_into(organizations::table) diesel::replace_into(organizations::table)
.values(&*self) .values(&*self).execute(&**conn).and(Ok(()))
.execute(&**conn) {
Ok(1) => true, // One row inserted
_ => false,
}
} }
pub fn delete(self, conn: &DbConn) -> QueryResult<()> { pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
use super::{Cipher, Collection}; use super::{Cipher, Collection};
if self.uuid == Organization::VIRTUAL_ID {
return Err(diesel::result::Error::NotFound)
}
Cipher::delete_all_by_organization(&self.uuid, &conn)?; Cipher::delete_all_by_organization(&self.uuid, &conn)?;
Collection::delete_all_by_organization(&self.uuid, &conn)?; Collection::delete_all_by_organization(&self.uuid, &conn)?;
UserOrganization::delete_all_by_organization(&self.uuid, &conn)?; UserOrganization::delete_all_by_organization(&self.uuid, &conn)?;
@@ -143,6 +171,9 @@ impl Organization {
} }
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> { pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
if uuid == Organization::VIRTUAL_ID {
return Some(Self::new_virtual())
};
organizations::table organizations::table
.filter(organizations::uuid.eq(uuid)) .filter(organizations::uuid.eq(uuid))
.first::<Self>(&**conn).ok() .first::<Self>(&**conn).ok()
@@ -158,6 +189,7 @@ impl UserOrganization {
"Name": org.name, "Name": org.name,
"Seats": 10, "Seats": 10,
"MaxCollections": 10, "MaxCollections": 10,
"UsersGetPremium": true,
"Use2fa": true, "Use2fa": true,
"UseDirectory": false, "UseDirectory": false,
@@ -194,7 +226,7 @@ impl UserOrganization {
}) })
} }
pub fn to_json_collection_user_details(&self, read_only: &bool, conn: &DbConn) -> JsonValue { pub fn to_json_collection_user_details(&self, read_only: bool, conn: &DbConn) -> JsonValue {
let user = User::find_by_uuid(&self.user_uuid, conn).unwrap(); let user = User::find_by_uuid(&self.user_uuid, conn).unwrap();
json!({ json!({
@@ -230,18 +262,20 @@ impl UserOrganization {
}) })
} }
pub fn save(&mut self, conn: &DbConn) -> bool { pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
if self.org_uuid == Organization::VIRTUAL_ID {
return Err(diesel::result::Error::NotFound)
}
User::update_uuid_revision(&self.user_uuid, conn); User::update_uuid_revision(&self.user_uuid, conn);
match diesel::replace_into(users_organizations::table) diesel::replace_into(users_organizations::table)
.values(&*self) .values(&*self).execute(&**conn).and(Ok(()))
.execute(&**conn) {
Ok(1) => true, // One row inserted
_ => false,
}
} }
pub fn delete(self, conn: &DbConn) -> QueryResult<()> { pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
if self.org_uuid == Organization::VIRTUAL_ID {
return Err(diesel::result::Error::NotFound)
}
User::update_uuid_revision(&self.user_uuid, conn); User::update_uuid_revision(&self.user_uuid, conn);
CollectionUser::delete_all_by_user(&self.user_uuid, &conn)?; CollectionUser::delete_all_by_user(&self.user_uuid, &conn)?;
@@ -260,6 +294,13 @@ impl UserOrganization {
Ok(()) Ok(())
} }
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> QueryResult<()> {
for user_org in Self::find_any_state_by_user(&user_uuid, &conn) {
user_org.delete(&conn)?;
}
Ok(())
}
pub fn has_full_access(self) -> bool { pub fn has_full_access(self) -> bool {
self.access_all || self.type_ < UserOrgType::User as i32 self.access_all || self.type_ < UserOrgType::User as i32
} }
@@ -270,18 +311,51 @@ impl UserOrganization {
.first::<Self>(&**conn).ok() .first::<Self>(&**conn).ok()
} }
pub fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
users_organizations::table
.filter(users_organizations::uuid.eq(uuid))
.filter(users_organizations::org_uuid.eq(org_uuid))
.first::<Self>(&**conn).ok()
}
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> { pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
users_organizations::table users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid)) .filter(users_organizations::user_uuid.eq(user_uuid))
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32)) .filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
.load::<Self>(&**conn).unwrap_or(vec![]) .load::<Self>(&**conn).unwrap_or_default()
}
pub fn find_invited_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
.filter(users_organizations::status.eq(UserOrgStatus::Invited as i32))
.load::<Self>(&**conn).unwrap_or_default()
}
pub fn find_any_state_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
.load::<Self>(&**conn).unwrap_or_default()
} }
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> { pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
if org_uuid == Organization::VIRTUAL_ID {
User::get_all(&*conn).iter().map(|user| {
Self::new_virtual(
user.uuid.clone(),
UserOrgType::User,
if Invitation::find_by_mail(&user.email, &conn).is_some() {
UserOrgStatus::Invited
} else {
UserOrgStatus::Confirmed
})
}).collect()
} else {
users_organizations::table users_organizations::table
.filter(users_organizations::org_uuid.eq(org_uuid)) .filter(users_organizations::org_uuid.eq(org_uuid))
.load::<Self>(&**conn).expect("Error loading user organizations") .load::<Self>(&**conn).expect("Error loading user organizations")
} }
}
pub fn find_by_org_and_type(org_uuid: &str, type_: i32, conn: &DbConn) -> Vec<Self> { pub fn find_by_org_and_type(org_uuid: &str, type_: i32, conn: &DbConn) -> Vec<Self> {
users_organizations::table users_organizations::table
@@ -316,6 +390,22 @@ impl UserOrganization {
.select(users_organizations::all_columns) .select(users_organizations::all_columns)
.load::<Self>(&**conn).expect("Error loading user organizations") .load::<Self>(&**conn).expect("Error loading user organizations")
} }
pub fn find_by_collection_and_org(collection_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec<Self> {
users_organizations::table
.filter(users_organizations::org_uuid.eq(org_uuid))
.left_join(users_collections::table.on(
users_collections::user_uuid.eq(users_organizations::user_uuid)
))
.filter(
users_organizations::access_all.eq(true).or( // AccessAll..
users_collections::collection_uuid.eq(&collection_uuid) // ..or access to collection with cipher
)
)
.select(users_organizations::all_columns)
.load::<Self>(&**conn).expect("Error loading user organizations")
}
} }

View File

@@ -35,29 +35,31 @@ pub struct User {
pub equivalent_domains: String, pub equivalent_domains: String,
pub excluded_globals: String, pub excluded_globals: String,
pub client_kdf_type: i32,
pub client_kdf_iter: i32,
} }
/// Local methods /// Local methods
impl User { impl User {
pub fn new(mail: String, key: String, password: String) -> Self { pub const CLIENT_KDF_TYPE_DEFAULT: i32 = 0; // PBKDF2: 0
pub const CLIENT_KDF_ITER_DEFAULT: i32 = 5_000;
pub fn new(mail: String) -> Self {
let now = Utc::now().naive_utc(); let now = Utc::now().naive_utc();
let email = mail.to_lowercase(); let email = mail.to_lowercase();
let iterations = CONFIG.password_iterations;
let salt = crypto::get_random_64();
let password_hash = crypto::hash_password(password.as_bytes(), &salt, iterations as u32);
Self { Self {
uuid: Uuid::new_v4().to_string(), uuid: Uuid::new_v4().to_string(),
created_at: now, created_at: now,
updated_at: now, updated_at: now,
name: email.clone(), name: email.clone(),
email, email,
key, key: String::new(),
password_hash, password_hash: Vec::new(),
salt, salt: crypto::get_random_64(),
password_iterations: iterations, password_iterations: CONFIG.password_iterations,
security_stamp: Uuid::new_v4().to_string(), security_stamp: Uuid::new_v4().to_string(),
@@ -70,6 +72,9 @@ impl User {
equivalent_domains: "[]".to_string(), equivalent_domains: "[]".to_string(),
excluded_globals: "[]".to_string(), excluded_globals: "[]".to_string(),
client_kdf_type: Self::CLIENT_KDF_TYPE_DEFAULT,
client_kdf_iter: Self::CLIENT_KDF_ITER_DEFAULT,
} }
} }
@@ -98,23 +103,32 @@ impl User {
pub fn reset_security_stamp(&mut self) { pub fn reset_security_stamp(&mut self) {
self.security_stamp = Uuid::new_v4().to_string(); self.security_stamp = Uuid::new_v4().to_string();
} }
pub fn is_server_admin(&self) -> bool {
match CONFIG.server_admin_email {
Some(ref server_admin_email) => &self.email == server_admin_email,
None => false
}
}
} }
use diesel; use diesel;
use diesel::prelude::*; use diesel::prelude::*;
use db::DbConn; use db::DbConn;
use db::schema::users; use db::schema::{users, invitations};
use super::{Cipher, Folder, Device, UserOrganization, UserOrgType};
/// Database methods /// Database methods
impl User { impl User {
pub fn to_json(&self, conn: &DbConn) -> JsonValue { pub fn to_json(&self, conn: &DbConn) -> JsonValue {
use super::UserOrganization; use super::{UserOrganization, UserOrgType, UserOrgStatus, TwoFactor};
use super::TwoFactor;
let orgs = UserOrganization::find_by_user(&self.uuid, conn); let mut orgs = UserOrganization::find_by_user(&self.uuid, conn);
if self.is_server_admin() {
orgs.push(UserOrganization::new_virtual(self.uuid.clone(), UserOrgType::Owner, UserOrgStatus::Confirmed));
}
let orgs_json: Vec<JsonValue> = orgs.iter().map(|c| c.to_json(&conn)).collect(); let orgs_json: Vec<JsonValue> = orgs.iter().map(|c| c.to_json(&conn)).collect();
let twofactor_enabled = !TwoFactor::find_by_user(&self.uuid, conn).is_empty();
let twofactor_enabled = TwoFactor::find_by_user(&self.uuid, conn).len() > 0;
json!({ json!({
"Id": self.uuid, "Id": self.uuid,
@@ -134,24 +148,34 @@ impl User {
} }
pub fn save(&mut self, conn: &DbConn) -> bool { pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
self.updated_at = Utc::now().naive_utc(); self.updated_at = Utc::now().naive_utc();
match diesel::replace_into(users::table) // Insert or update diesel::replace_into(users::table) // Insert or update
.values(&*self) .values(&*self).execute(&**conn).and(Ok(()))
.execute(&**conn) { }
Ok(1) => true, // One row inserted
_ => false, pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
for user_org in UserOrganization::find_by_user(&self.uuid, &*conn) {
if user_org.type_ == UserOrgType::Owner as i32 {
if UserOrganization::find_by_org_and_type(
&user_org.org_uuid,
UserOrgType::Owner as i32, &conn
).len() <= 1 {
return Err(diesel::result::Error::NotFound);
}
} }
} }
pub fn delete(self, conn: &DbConn) -> bool { UserOrganization::delete_all_by_user(&self.uuid, &*conn)?;
match diesel::delete(users::table.filter( Cipher::delete_all_by_user(&self.uuid, &*conn)?;
Folder::delete_all_by_user(&self.uuid, &*conn)?;
Device::delete_all_by_user(&self.uuid, &*conn)?;
Invitation::take(&self.email, &*conn); // Delete invitation if any
diesel::delete(users::table.filter(
users::uuid.eq(self.uuid))) users::uuid.eq(self.uuid)))
.execute(&**conn) { .execute(&**conn).and(Ok(()))
Ok(1) => true, // One row deleted
_ => false,
}
} }
pub fn update_uuid_revision(uuid: &str, conn: &DbConn) { pub fn update_uuid_revision(uuid: &str, conn: &DbConn) {
@@ -185,4 +209,53 @@ impl User {
.filter(users::uuid.eq(uuid)) .filter(users::uuid.eq(uuid))
.first::<Self>(&**conn).ok() .first::<Self>(&**conn).ok()
} }
pub fn get_all(conn: &DbConn) -> Vec<Self> {
users::table
.load::<Self>(&**conn).expect("Error loading users")
}
}
#[derive(Debug, Identifiable, Queryable, Insertable)]
#[table_name = "invitations"]
#[primary_key(email)]
pub struct Invitation {
pub email: String,
}
impl Invitation {
pub fn new(email: String) -> Self {
Self {
email
}
}
pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
diesel::replace_into(invitations::table)
.values(&*self)
.execute(&**conn)
.and(Ok(()))
}
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
diesel::delete(invitations::table.filter(
invitations::email.eq(self.email)))
.execute(&**conn)
.and(Ok(()))
}
pub fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
let lower_mail = mail.to_lowercase();
invitations::table
.filter(invitations::email.eq(lower_mail))
.first::<Self>(&**conn).ok()
}
pub fn take(mail: &str, conn: &DbConn) -> bool {
CONFIG.invitations_allowed &&
match Self::find_by_mail(mail, &conn) {
Some(invitation) => invitation.delete(&conn).is_ok(),
None => false
}
}
} }

View File

@@ -21,6 +21,7 @@ table! {
fields -> Nullable<Text>, fields -> Nullable<Text>,
data -> Text, data -> Text,
favorite -> Bool, favorite -> Bool,
password_history -> Nullable<Text>,
} }
} }
@@ -71,6 +72,12 @@ table! {
} }
} }
table! {
invitations (email) {
email -> Text,
}
}
table! { table! {
organizations (uuid) { organizations (uuid) {
uuid -> Text, uuid -> Text,
@@ -109,6 +116,8 @@ table! {
security_stamp -> Text, security_stamp -> Text,
equivalent_domains -> Text, equivalent_domains -> Text,
excluded_globals -> Text, excluded_globals -> Text,
client_kdf_type -> Integer,
client_kdf_iter -> Integer,
} }
} }
@@ -157,6 +166,7 @@ allow_tables_to_appear_in_same_query!(
devices, devices,
folders, folders,
folders_ciphers, folders_ciphers,
invitations,
organizations, organizations,
twofactor, twofactor,
users, users,

62
src/mail.rs Normal file
View File

@@ -0,0 +1,62 @@
use native_tls::{Protocol, TlsConnector};
use lettre::{Transport, SmtpTransport, SmtpClient, ClientTlsParameters, ClientSecurity};
use lettre::smtp::ConnectionReuseParameters;
use lettre::smtp::authentication::Credentials;
use lettre_email::EmailBuilder;
use MailConfig;
fn mailer(config: &MailConfig) -> SmtpTransport {
let client_security = if config.smtp_ssl {
let tls = TlsConnector::builder()
.min_protocol_version(Some(Protocol::Tlsv11))
.build()
.unwrap();
ClientSecurity::Required(ClientTlsParameters::new(config.smtp_host.clone(), tls))
} else {
ClientSecurity::None
};
let smtp_client = SmtpClient::new(
(config.smtp_host.as_str(), config.smtp_port),
client_security,
).unwrap();
let smtp_client = match (&config.smtp_username, &config.smtp_password) {
(Some(user), Some(pass)) => smtp_client.credentials(Credentials::new(user.clone(), pass.clone())),
_ => smtp_client,
};
smtp_client
.smtp_utf8(true)
.connection_reuse(ConnectionReuseParameters::NoReuse)
.transport()
}
pub fn send_password_hint(address: &str, hint: Option<String>, config: &MailConfig) -> Result<(), String> {
let (subject, body) = if let Some(hint) = hint {
("Your master password hint",
format!(
"You (or someone) recently requested your master password hint.\n\n\
Your hint is: \"{}\"\n\n\
If you did not request your master password hint you can safely ignore this email.\n",
hint))
} else {
("Sorry, you have no password hint...",
"Sorry, you have not specified any password hint...\n".into())
};
let email = EmailBuilder::new()
.to(address)
.from((config.smtp_from.clone(), "Bitwarden-rs"))
.subject(subject)
.body(body)
.build()
.map_err(|e| e.to_string())?;
mailer(config)
.send(email.into())
.map_err(|e| e.to_string())
.and(Ok(()))
}

View File

@@ -1,10 +1,14 @@
#![feature(plugin, custom_derive)] #![feature(plugin, custom_derive, vec_remove_item, try_trait)]
#![plugin(rocket_codegen)] #![plugin(rocket_codegen)]
#![recursion_limit="128"]
#![allow(proc_macro_derive_resolution_fallback)] // TODO: Remove this when diesel update fixes warnings #![allow(proc_macro_derive_resolution_fallback)] // TODO: Remove this when diesel update fixes warnings
extern crate rocket; extern crate rocket;
extern crate rocket_contrib; extern crate rocket_contrib;
extern crate reqwest; extern crate reqwest;
extern crate multipart; extern crate multipart;
extern crate ws;
extern crate rmpv;
extern crate chashmap;
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
@@ -27,8 +31,12 @@ extern crate lazy_static;
#[macro_use] #[macro_use]
extern crate num_derive; extern crate num_derive;
extern crate num_traits; extern crate num_traits;
extern crate lettre;
extern crate lettre_email;
extern crate native_tls;
extern crate byteorder;
use std::{env, path::Path, process::{exit, Command}}; use std::{path::Path, process::{exit, Command}};
use rocket::Rocket; use rocket::Rocket;
#[macro_use] #[macro_use]
@@ -38,6 +46,7 @@ mod api;
mod db; mod db;
mod crypto; mod crypto;
mod auth; mod auth;
mod mail;
fn init_rocket() -> Rocket { fn init_rocket() -> Rocket {
rocket::ignite() rocket::ignite()
@@ -45,7 +54,9 @@ fn init_rocket() -> Rocket {
.mount("/api", api::core_routes()) .mount("/api", api::core_routes())
.mount("/identity", api::identity_routes()) .mount("/identity", api::identity_routes())
.mount("/icons", api::icons_routes()) .mount("/icons", api::icons_routes())
.mount("/notifications", api::notifications_routes())
.manage(db::init_pool()) .manage(db::init_pool())
.manage(api::start_notification_server())
} }
// Embed the migrations from the migrations folder into the application // Embed the migrations from the migrations folder into the application
@@ -70,7 +81,6 @@ fn main() {
check_web_vault(); check_web_vault();
migrations::run_migrations(); migrations::run_migrations();
init_rocket().launch(); init_rocket().launch();
} }
@@ -154,6 +164,61 @@ lazy_static! {
static ref CONFIG: Config = Config::load(); static ref CONFIG: Config = Config::load();
} }
#[derive(Debug)]
pub struct MailConfig {
smtp_host: String,
smtp_port: u16,
smtp_ssl: bool,
smtp_from: String,
smtp_username: Option<String>,
smtp_password: Option<String>,
}
impl MailConfig {
fn load() -> Option<Self> {
use util::{get_env, get_env_or};
// When SMTP_HOST is absent, we assume the user does not want to enable it.
let smtp_host = match get_env("SMTP_HOST") {
Some(host) => host,
None => return None,
};
let smtp_from = get_env("SMTP_FROM").unwrap_or_else(|| {
println!("Please specify SMTP_FROM to enable SMTP support.");
exit(1);
});
let smtp_ssl = get_env_or("SMTP_SSL", true);
let smtp_port = get_env("SMTP_PORT").unwrap_or_else(||
if smtp_ssl {
587u16
} else {
25u16
}
);
let smtp_username = get_env("SMTP_USERNAME");
let smtp_password = get_env("SMTP_PASSWORD").or_else(|| {
if smtp_username.as_ref().is_some() {
println!("SMTP_PASSWORD is mandatory when specifying SMTP_USERNAME.");
exit(1);
} else {
None
}
});
Some(MailConfig {
smtp_host,
smtp_port,
smtp_ssl,
smtp_from,
smtp_username,
smtp_password,
})
}
}
#[derive(Debug)] #[derive(Debug)]
pub struct Config { pub struct Config {
database_url: String, database_url: String,
@@ -167,42 +232,58 @@ pub struct Config {
web_vault_folder: String, web_vault_folder: String,
web_vault_enabled: bool, web_vault_enabled: bool,
websocket_enabled: bool,
websocket_url: String,
local_icon_extractor: bool, local_icon_extractor: bool,
signups_allowed: bool, signups_allowed: bool,
invitations_allowed: bool,
server_admin_email: Option<String>,
password_iterations: i32, password_iterations: i32,
show_password_hint: bool, show_password_hint: bool,
domain: String, domain: String,
domain_set: bool, domain_set: bool,
mail: Option<MailConfig>,
} }
impl Config { impl Config {
fn load() -> Self { fn load() -> Self {
use util::{get_env, get_env_or};
dotenv::dotenv().ok(); dotenv::dotenv().ok();
let df = env::var("DATA_FOLDER").unwrap_or("data".into()); let df = get_env_or("DATA_FOLDER", "data".to_string());
let key = env::var("RSA_KEY_FILENAME").unwrap_or(format!("{}/{}", &df, "rsa_key")); let key = get_env_or("RSA_KEY_FILENAME", format!("{}/{}", &df, "rsa_key"));
let domain = env::var("DOMAIN"); let domain = get_env("DOMAIN");
Config { Config {
database_url: env::var("DATABASE_URL").unwrap_or(format!("{}/{}", &df, "db.sqlite3")), database_url: get_env_or("DATABASE_URL", format!("{}/{}", &df, "db.sqlite3")),
icon_cache_folder: env::var("ICON_CACHE_FOLDER").unwrap_or(format!("{}/{}", &df, "icon_cache")), icon_cache_folder: get_env_or("ICON_CACHE_FOLDER", format!("{}/{}", &df, "icon_cache")),
attachments_folder: env::var("ATTACHMENTS_FOLDER").unwrap_or(format!("{}/{}", &df, "attachments")), attachments_folder: get_env_or("ATTACHMENTS_FOLDER", format!("{}/{}", &df, "attachments")),
private_rsa_key: format!("{}.der", &key), private_rsa_key: format!("{}.der", &key),
private_rsa_key_pem: format!("{}.pem", &key), private_rsa_key_pem: format!("{}.pem", &key),
public_rsa_key: format!("{}.pub.der", &key), public_rsa_key: format!("{}.pub.der", &key),
web_vault_folder: env::var("WEB_VAULT_FOLDER").unwrap_or("web-vault/".into()), web_vault_folder: get_env_or("WEB_VAULT_FOLDER", "web-vault/".into()),
web_vault_enabled: util::parse_option_string(env::var("WEB_VAULT_ENABLED").ok()).unwrap_or(true), web_vault_enabled: get_env_or("WEB_VAULT_ENABLED", true),
local_icon_extractor: util::parse_option_string(env::var("LOCAL_ICON_EXTRACTOR").ok()).unwrap_or(false), websocket_enabled: get_env_or("WEBSOCKET_ENABLED", false),
signups_allowed: util::parse_option_string(env::var("SIGNUPS_ALLOWED").ok()).unwrap_or(true), websocket_url: format!("{}:{}", get_env_or("WEBSOCKET_ADDRESS", "0.0.0.0".to_string()), get_env_or("WEBSOCKET_PORT", 3012)),
password_iterations: util::parse_option_string(env::var("PASSWORD_ITERATIONS").ok()).unwrap_or(100_000),
show_password_hint: util::parse_option_string(env::var("SHOW_PASSWORD_HINT").ok()).unwrap_or(true),
domain_set: domain.is_ok(), local_icon_extractor: get_env_or("LOCAL_ICON_EXTRACTOR", false),
signups_allowed: get_env_or("SIGNUPS_ALLOWED", true),
server_admin_email: get_env("SERVER_ADMIN_EMAIL"),
invitations_allowed: get_env_or("INVITATIONS_ALLOWED", true),
password_iterations: get_env_or("PASSWORD_ITERATIONS", 100_000),
show_password_hint: get_env_or("SHOW_PASSWORD_HINT", true),
domain_set: domain.is_some(),
domain: domain.unwrap_or("http://localhost".into()), domain: domain.unwrap_or("http://localhost".into()),
mail: MailConfig::load(),
} }
} }
} }

View File

@@ -6,17 +6,18 @@ macro_rules! err {
($err:expr, $msg:expr) => {{ ($err:expr, $msg:expr) => {{
println!("ERROR: {}", $msg); println!("ERROR: {}", $msg);
err_json!(json!({ err_json!(json!({
"Message": $err, "error": $err,
"ValidationErrors": { "error_description": $err,
"": [$msg,], "ErrorModel": {
}, "Message": $msg,
"ValidationErrors": null,
"ExceptionMessage": null, "ExceptionMessage": null,
"ExceptionStackTrace": null, "ExceptionStackTrace": null,
"InnerExceptionMessage": null, "InnerExceptionMessage": null,
"Object": "error", "Object": "error"
})) }}))
}}; }};
($msg:expr) => { err!("The model state is invalid", $msg) } ($msg:expr) => { err!("unknown_error", $msg) }
} }
#[macro_export] #[macro_export]
@@ -97,6 +98,7 @@ pub fn get_display_size(size: i32) -> String {
/// ///
use std::str::FromStr; use std::str::FromStr;
use std::ops::Try;
pub fn upcase_first(s: &str) -> String { pub fn upcase_first(s: &str) -> String {
let mut c = s.chars(); let mut c = s.chars();
@@ -106,14 +108,37 @@ pub fn upcase_first(s: &str) -> String {
} }
} }
pub fn parse_option_string<S, T>(string: Option<S>) -> Option<T> where S: AsRef<str>, T: FromStr { pub fn try_parse_string<S, T, U>(string: impl Try<Ok = S, Error=U>) -> Option<T> where S: AsRef<str>, T: FromStr {
if let Some(Ok(value)) = string.map(|s| s.as_ref().parse::<T>()) { if let Ok(Ok(value)) = string.into_result().map(|s| s.as_ref().parse::<T>()) {
Some(value) Some(value)
} else { } else {
None None
} }
} }
pub fn try_parse_string_or<S, T, U>(string: impl Try<Ok = S, Error=U>, default: T) -> T where S: AsRef<str>, T: FromStr {
if let Ok(Ok(value)) = string.into_result().map(|s| s.as_ref().parse::<T>()) {
value
} else {
default
}
}
///
/// Env methods
///
use std::env;
pub fn get_env<V>(key: &str) -> Option<V> where V: FromStr {
try_parse_string(env::var(key))
}
pub fn get_env_or<V>(key: &str, default: V) -> V where V: FromStr {
try_parse_string_or(env::var(key), default)
}
/// ///
/// Date util methods /// Date util methods
/// ///