Compare commits

...

246 Commits
0.9.0 ... 1.2.0

Author SHA1 Message Date
Daniel García
2bb6482bec Merge branch 'openssl-fix' 2018-09-20 22:52:58 +02:00
Daniel García
c169095128 Update dependencies to point to upstream lettre 2018-09-20 22:45:19 +02:00
Daniel García
b1397c95ca Remove unnecessary path in PROXY.md 2018-09-19 22:33:12 +02:00
Daniel García
3df31e3464 Temp fix for OpenSSL 1.1.1 compatibility 2018-09-19 21:45:50 +02:00
Daniel García
638a0fd3c3 Updated dependencies 2018-09-19 21:43:03 +02:00
Daniel García
ebb66c374e Implement KDF iterations change (Fixes #195) 2018-09-19 17:30:14 +02:00
Daniel García
89e3c41043 Merge pull request #191 from mprasil/vault_2.3.0
Update Vault to v2.3.0
2018-09-18 16:03:10 +02:00
Miroslav Prasil
3da410ef71 Update Vault to v2.3.0 2018-09-18 13:53:25 +01:00
Daniel García
2dccbd3412 Merge pull request #190 from mprasil/invite_readme
Update the Invitation workflow documentation
2018-09-18 14:36:39 +02:00
Daniel García
2ff529ed99 Merge pull request #189 from mprasil/delete_fix
Add alias for DELETE call on accounts
2018-09-18 14:36:31 +02:00
Miroslav Prasil
4fae1e4298 Update the Invitation workflow documentation 2018-09-18 11:49:20 +01:00
Miroslav Prasil
f7951b44ba Add alias for DELETE call on accounts 2018-09-18 11:13:45 +01:00
Daniel García
ff8eeff995 Merge pull request #184 from mprasil/code_block_fix
Fixed code block, added some formatting
2018-09-16 15:44:05 +02:00
Miroslav Prasil
00019dc356 Added some formating and link 2018-09-16 12:36:08 +01:00
Miroslav Prasil
404fe5321e Fixed code block 2018-09-16 12:27:29 +01:00
Daniel García
e7dd239d20 Merge pull request #182 from dobunzli/master
Update README.md
2018-09-15 20:03:58 +02:00
Daniel García
071f3370e3 Merge pull request #183 from jkaberg/traefik_example_proxy
traefik proxy example
2018-09-15 20:03:34 +02:00
Joel Kåberg
ee321be579 traefik example 2018-09-14 23:22:38 +02:00
dobunzli
eb61425da5 Update README.md
Added infos about enabling https when softwares getting certs are using symlinks
2018-09-14 22:39:58 +02:00
Daniel García
b75ba216d1 Return default prelogin values when the user doesn't exist 2018-09-13 23:04:52 +02:00
Daniel García
8651df8c2a Fixed some lint issues 2018-09-13 21:55:23 +02:00
Daniel García
948554a20f Added config option for websocket port, and reworked the config parsing a bit.
Added SMTP_FROM config to examples and made it mandatory, it doesn't make much sense to not specify the from address.
2018-09-13 20:59:51 +02:00
Daniel García
9cdb605659 Include more proxy examples 2018-09-13 17:08:16 +02:00
Daniel García
928e2424c0 Updated dependencies and fixed errors 2018-09-13 16:05:13 +02:00
Daniel García
a01fee0b9f Merge branch 'ws'
# Conflicts:
#	Cargo.toml
#	src/api/core/ciphers.rs
#	src/main.rs
2018-09-13 15:59:45 +02:00
Daniel García
924e4a17e5 Merge pull request #175 from stammw/master
Documentation for SMTP and password hint configuration
2018-09-13 15:46:52 +02:00
Daniel García
fdbd73c716 Merge branch 'master' into master 2018-09-13 15:39:28 +02:00
Daniel García
f397f0cbd0 Implement organization import for admins and owners (Fixes #178) 2018-09-13 15:16:24 +02:00
Daniel García
4d2c6e39b2 Merge pull request #177 from mprasil/raspberry
Add Dockerfile for Raspberry Pi
2018-09-13 00:19:15 +02:00
Daniel García
3e1afb139c Remove unnecessary return 2018-09-12 23:58:02 +02:00
Jean-Christophe BEGUE
af69606bea Documentation for SMTP and password hint configuration 2018-09-12 21:19:29 +02:00
Miroslav Prasil
bc8ff14695 Fix the binary path 2018-09-12 13:51:43 +01:00
Miroslav Prasil
5f7b220eb4 Initial shot as cross compilation 2018-09-12 12:15:26 +01:00
Daniel García
67adfee5e5 Some documentation 2018-09-11 17:27:04 +02:00
Daniel García
d66d4fd87f Add error message when the proxy doesn't route websockets correctly 2018-09-11 17:09:33 +02:00
Daniel García
1b20a25514 Merge pull request #173 from mprasil/poormans_invites
Implement poor man's invitation via Organization invitation
2018-09-11 16:48:56 +02:00
Miroslav Prasil
c1cd4d9a6b Modify User::new to be keyless and paswordless 2018-09-11 14:25:12 +01:00
Daniel García
b63693aefb Merge pull request #137 from stammw/master
SMTP implementation, along with password HINT email
2018-09-11 14:58:09 +02:00
Miroslav Prasil
ec05f14f5a Implement poor man's invitation via Organization invitation 2018-09-11 13:09:59 +01:00
Jean-Christophe BEGUE
37d88be2be return an error when email adress for password hint is not valid 2018-09-11 13:12:24 +02:00
Jean-Christophe BEGUE
1c641d7635 Special messages when user has no password hint 2018-09-11 13:04:34 +02:00
Jean-Christophe BEGUE
e2ab2f7306 Save None instead of empty password hint 2018-09-11 13:00:59 +02:00
Daniel García
434551e012 Merge pull request #171 from shauder/ws
Expose 3012 in docker build file for notifications
2018-09-04 21:18:16 +02:00
Daniel García
69dcbdd3b2 Merge branch 'master' into ws 2018-09-04 17:46:38 +02:00
Daniel García
8df6f79f19 Merge pull request #170 from mprasil/org-user-edit
Fix editing users in Organization
2018-09-04 17:32:16 +02:00
Shane A. Faulkner
422f7ccfa8 Expose 3012 in docker build file for notifications 2018-09-04 10:22:17 -05:00
Miroslav Prasil
c58682e3fb Fix the logic in user edditing 2018-09-04 16:10:26 +01:00
Miroslav Prasil
db111ae2a0 Check properly the user membership in Organization 2018-09-04 13:37:44 +01:00
Miroslav Prasil
049aa33f17 Fix editing users in Organization 2018-09-04 12:15:46 +01:00
Daniel García
b1ac37609f Merge pull request #169 from mprasil/http_warning
Add info on running over HTTP (documentation for #153)
2018-09-03 13:47:05 +02:00
Miroslav Prasil
53e8f78af6 Link to the https setup 2018-09-03 10:59:59 +01:00
Miroslav Prasil
1bced97e04 Add info on running over HTTP (documentation for #153) 2018-09-03 10:53:52 +01:00
Daniel García
f8ae5013cb Merge pull request #167 from shauder/ws
Add support for cipher update notifications
2018-09-02 00:17:40 +02:00
Shane A. Faulkner
d8e5e53273 Add notifications for cipher delete and create 2018-09-01 10:59:13 -05:00
Shane A. Faulkner
b6502e9e9d Add support for CipherUpdate notifications 2018-08-31 23:30:53 -05:00
Daniel García
d70864ac73 Initial version of websockets notification support.
For now only folder notifications are sent (create, rename, delete).
The notifications are only tested between two web-vault sessions in different browsers, mobile apps and browser extensions are untested.

The websocket server is exposed in port 3012, while the rocket server is exposed in another port (8000 by default). To make notifications work, both should be accessible in the same port, which requires a reverse proxy.

My testing is done with Caddy server, and the following config:

```
localhost {

    # The negotiation endpoint is also proxied to Rocket
    proxy /notifications/hub/negotiate 0.0.0.0:8000 {
        transparent
    }

    # Notifications redirected to the websockets server
    proxy /notifications/hub 0.0.0.0:3012 {
        websocket
    }

    # Proxy the Root directory to Rocket
    proxy / 0.0.0.0:8000 {
        transparent
    }
}
```

This exposes the service in port 2015.
2018-08-30 17:58:53 +02:00
Daniel García
f94e626021 Merge pull request #166 from mprasil/alpine
Alpine
2018-08-30 16:47:58 +02:00
Daniel García
0a3b84b815 Merge pull request #165 from mprasil/shared_edit_fix
Fix editing shared cipher (fixes #164)
2018-08-30 16:47:08 +02:00
Miroslav Prasil
d336d89b83 Fix editing shared cipher (fixes #164) 2018-08-30 11:12:29 +01:00
Miroslav Prasil
1a5c1979e3 Move Alpine Dockerfile to separate file 2018-08-30 10:38:38 +01:00
Miroslav Prasil
cec9566d2a Merge branch 'master' into alpine 2018-08-29 15:06:50 +01:00
Baelyk
fe473b9e75 Attachment::save() returns Result instead of bool (#161)
Returning a result instead of a bool as per #6
2018-08-29 15:22:19 +02:00
mprasil
062ae4dd59 Allow non-Admin user to share to collection (fixes #157) (#159)
* Allow non-Admin user to share to collection (fixes #157)

* Better handling of collection sharing
2018-08-29 15:22:03 +02:00
Miroslav Prasil
45d676eb10 Merge branch 'master' into alpine 2018-08-29 10:07:09 +01:00
mprasil
3cfdf9b585 Add DELETE handlers fo cipher and attachment deletion (fixes #158) (#160) 2018-08-29 00:48:53 +02:00
Miroslav Prasil
08b551624c Merge branch 'master' into alpine 2018-08-28 14:06:54 +01:00
Daniel García
761a0a3393 Removed accidental change to Dockerfile 2018-08-28 12:54:57 +02:00
Daniel García
6660b0aef3 Updated web vault to version 2.2 2018-08-28 03:22:13 +02:00
Kumar Ankur
781056152a Support password history #155 (#156)
* Password History Support (#155)

* down.sql logic not required as per review comments
2018-08-27 23:08:58 +02:00
Miroslav Prasil
6822bb28a0 Merge branch 'master' into alpine 2018-08-26 16:58:46 +01:00
Daniel García
b82710eecf Merge pull request #152 from Baelyk/master
Add ip and username to failed login attempts
2018-08-26 17:43:50 +02:00
Baelyk
c386b3bcf7 Add IP and Username to failed login attempts
Resolves #119
2018-08-25 17:07:59 -05:00
Miroslav Prasil
ffec0b065b Updated build image version 2018-08-25 09:29:50 +01:00
Miroslav Prasil
5b7fe9f155 Merge branch 'master' into alpine 2018-08-24 23:17:52 +01:00
Daniel García
8d1ee859f2 Implemented basic support for prelogin and notification negotiation 2018-08-24 19:02:34 +02:00
Daniel García
c91f80c456 Fixed rust toolchain date 2018-08-24 17:12:04 +02:00
Daniel García
39891e86a0 Updated dependencies, added Travis CI integration and some badges 2018-08-24 17:07:11 +02:00
Miroslav Prasil
575f701390 Merge branch 'master' into alpine 2018-08-23 21:59:23 +01:00
Daniel García
335099cd30 Merge pull request #150 from mprasil/build_instructions
Update the build instruction for new Vault
2018-08-23 16:05:24 +02:00
Miroslav Prasil
9fad541c87 Clone repository instead of downloading as suggested by @mqus 2018-08-23 12:08:54 +01:00
Miroslav Prasil
007e053e2f Update the build instruction for new Vault 2018-08-23 11:06:32 +01:00
Miroslav Prasil
ef2413a5aa Fix SSL issue, rm cache 2018-08-21 22:08:16 +01:00
Miroslav Prasil
ca8e1c646d Update build image 2018-08-21 22:08:16 +01:00
Miroslav Prasil
346c7630c9 Initial implementation of musl build on top of Alpine 2018-08-21 22:08:16 +01:00
Daniel García
1c57c9d8e0 Merge pull request #148 from mprasil/beta
Merge Beta to master
2018-08-21 22:41:50 +02:00
Daniel García
bd20d8724b Merge pull request #147 from mprasil/master
Bump version to 0.13.0 - latest Vault v1
2018-08-21 22:32:54 +02:00
Miroslav Prasil
69a18255c6 Bump up version to 1.0.0 2018-08-21 21:21:54 +01:00
Miroslav Prasil
c40baf5e17 Merge branch 'master' into beta 2018-08-21 21:17:12 +01:00
Miroslav Prasil
df041108f6 Bump version to 0.13.0 - latest Vault v1 2018-08-21 21:13:56 +01:00
Daniel García
ee10d278a7 Merge pull request #146 from mprasil/cipher_folder_revision
Update affected users revision on cipher and folder change
2018-08-21 21:44:33 +02:00
Miroslav Prasil
2b2401be19 Update affected users revision on cipher and folder change 2018-08-21 17:32:00 +01:00
Daniel García
4f58d07c83 Merge pull request #145 from mprasil/org_user_revision
Organization update improvements
2018-08-21 16:27:19 +02:00
Miroslav Prasil
9eea0151ba Update user revision timestamp on Organization changes 2018-08-21 13:26:22 +01:00
Miroslav Prasil
40d09ddd2a Add PUT alias for Organization updates 2018-08-21 13:25:52 +01:00
Daniel García
d332e87655 Merge pull request #144 from mprasil/collection_revision
Update affected users revision when there are collection changes
2018-08-21 13:47:19 +02:00
Daniel García
0fa48a749f Merge pull request #143 from mprasil/update_revision_fix
Actually update the revision date for user struct, not just in DB
2018-08-21 13:46:05 +02:00
Miroslav Prasil
a5ef8aef0f Update affected users revision when there are collection changes 2018-08-21 12:20:55 +01:00
Miroslav Prasil
4fb09c5b4d Actually update the revision date for user struct, not just in DB 2018-08-21 10:36:04 +01:00
Jean-Christophe BEGUE
9e63985b28 Check email validity before using it for password hint sending 2018-08-16 21:25:28 +02:00
Daniel García
6fdeeb56ce Merge pull request #140 from mprasil/error_format
Update the error format to show message in new Vault
2018-08-16 00:52:46 +02:00
Daniel García
b002d34cd4 Merge pull request #139 from mprasil/edit_shared_fix
Add PUT alias for editing cipher
2018-08-15 23:02:59 +02:00
Daniel García
e46fc62b78 Merge pull request #141 from mprasil/profile_update
Add PUT alias for profile update
2018-08-15 23:02:34 +02:00
Jean-Christophe BEGUE
401aa7c699 make SMTP authentication optionnal, let lettre pick the better auth mechanism 2018-08-15 17:21:19 +02:00
Miroslav Prasil
12a2dc0901 Add PUT alias for profile update 2018-08-15 16:10:40 +01:00
Miroslav Prasil
b3f3fd81ac Update theerror format to show message in new Vault 2018-08-15 15:50:07 +01:00
Miroslav Prasil
f2fec345ec Add PUT alias for editing cipher 2018-08-15 14:27:37 +01:00
Daniel García
b6312340b6 Merge pull request #138 from mprasil/readme_updates
Cleaned up HTTPS example
2018-08-15 15:12:01 +02:00
Daniel García
3d1fc0f2e8 Merge pull request #136 from mprasil/disable_analytics
Remove analytics from Vault
2018-08-15 15:11:45 +02:00
Jean-Christophe BEGUE
d68f57cbba Fix password hint showing logic 2018-08-15 14:08:00 +02:00
Miroslav Prasil
80bad9f66d Cleaned up HTTPS example 2018-08-15 11:18:34 +01:00
Jean-Christophe BEGUE
19e0605d30 Better message into the password hint email 2018-08-15 10:17:05 +02:00
Jean-Christophe BEGUE
812387e586 SMTP integration, send password hint by email. 2018-08-15 08:45:18 +02:00
Miroslav Prasil
5ecafb157d Disable analytics via patch to Vault 2018-08-14 21:48:56 +01:00
Daniel García
f1ade62638 Merge pull request #135 from mprasil/empty_collection
Deserialize "null" to empty Vec for Collections
2018-08-14 16:43:03 +02:00
Miroslav Prasil
00b882935f Deserialize "null" to empty Vec for Collections 2018-08-14 11:06:42 +01:00
Daniel García
eb5641b863 Merge pull request #134 from mprasil/put_collections_admin
Add aliases for PUTs and DELETEs on collections, organizations and org users
2018-08-13 20:16:34 +02:00
Miroslav Prasil
0dfd9c7670 Add couple more aliases for PUTs and DELETEs 2018-08-13 16:45:30 +01:00
Miroslav Prasil
6ede1743ac add alias for PUT collections-admin 2018-08-13 16:00:10 +01:00
Daniel García
d3f357b708 Implemented PUT for u2f registration 2018-08-13 15:26:01 +02:00
Daniel García
5a55dd1d4b Merge pull request #133 from mprasil/document_differences
Extend documentation
2018-08-13 14:38:54 +02:00
Daniel García
16056626b0 Merge pull request #131 from mprasil/revision_date
Implement update_revision trigger
2018-08-13 14:38:30 +02:00
Jean-Christophe BEGUE
f7ffb81d9e SMTP configuration parsing and checking 2018-08-13 13:46:32 +02:00
Miroslav Prasil
626a3c93ba Revert "Merge branch 'beta' of https://github.com/krankur/bitwarden_rs into beta"
This reverts commit 3fd3d8d5e9.
2018-08-13 12:35:41 +01:00
Miroslav Prasil
c0f554311b Extend documentation 2018-08-13 12:01:52 +01:00
Miroslav Prasil
3f5a99916a Implement update_revision trigger 2018-08-13 10:58:39 +01:00
Miroslav Prasil
b5a057f063 Merge branch 'master' into beta 2018-08-10 21:43:16 +01:00
Daniel García
e7e0717f5b Merge pull request #129 from krankur/beta
Implemented PUT for /two-factor/authenticator and /two-factor/disable #124
2018-08-10 22:20:44 +02:00
Kumar Ankur
3fd3d8d5e9 Merge branch 'beta' of https://github.com/krankur/bitwarden_rs into beta 2018-08-10 23:49:34 +05:30
Kumar Ankur
7b2de40beb Merge branch 'beta' of https://github.com/krankur/bitwarden_rs into beta 2018-08-10 23:26:55 +05:30
Kumar Ankur
5f6d721c09 Implemented PUT for /two-factor/authenticator and /two-factor/disable 2018-08-10 23:20:19 +05:30
Kumar Ankur
ddda86b90d Implemented bulk cipher share (share selected) #100 2018-08-10 23:20:19 +05:30
Daniel García
c6256e1455 Merge pull request #128 from mprasil/revision_date
Return revision date in miliseconds (fixes #127)
2018-08-10 19:40:56 +02:00
Daniel García
0cd3053fcb Merge pull request #125 from stammw/master
Make password hints available in the error message #85
2018-08-10 19:40:31 +02:00
Miroslav Prasil
58c1545707 Return revision date in miliseconds (fixes #127) 2018-08-10 17:18:59 +01:00
Jean-Christophe BEGUE
d3b4b10d18 Add a explaination to the password hint message #85 2018-08-10 16:59:23 +02:00
Jean-Christophe BEGUE
c031ae9f2f Make password hints available in the error message #85 2018-08-10 15:52:06 +02:00
Daniel García
672e3273cd Merge pull request #123 from mprasil/beta_patch
Fix patch file for v2.1.1
2018-08-09 16:40:29 +02:00
Miroslav Prasil
039860f87e Fix patch file for v2.1.1 2018-08-09 13:38:40 +01:00
Daniel García
9511456ded Merge pull request #116 from krankur/beta
Implemented bulk cipher share (share selected) #100
2018-08-09 01:44:16 +02:00
Daniel García
04b198a7e2 Merge pull request #120 from mprasil/vault_2.1.1
Update vault to latest version
2018-08-09 01:43:22 +02:00
Miroslav Prasil
73a1abed10 Update vault to latest version 2018-08-08 23:15:01 +02:00
Kumar Ankur
fb7b1c8c18 Implemented bulk cipher share (share selected) #100 2018-08-06 03:29:44 +05:30
Daniel García
8ffa7ebb6a Merge pull request #115 from krankur/beta
Implmeneted DELETE on 'api/ciphers' to delete selected ciphers (#98)
2018-08-03 16:29:23 +02:00
Kumar Ankur
aac1304b46 clean up 2018-08-03 19:31:01 +05:30
Kumar Ankur
7dfc759691 Implmeneted DELETE on 'api/ciphers' to delete selected ciphers (#98) 2018-08-03 19:23:38 +05:30
Daniel García
54afe0671e Merge pull request #111 from krankur/beta
Implemented PUT for single cipher sharing (#97)
2018-08-01 21:12:48 +02:00
Kumar Ankur
74e2ca81ae Implemented PUT for single cipher sharing (#97) 2018-08-02 00:07:14 +05:30
Daniel García
d6fadb52ff Merge pull request #109 from mprasil/beta_merge
Merge changes from master to beta
2018-08-01 12:57:32 +02:00
Miroslav Prasil
b163aeb8ca Merge changes in master to beta branch (concurrency fixes) 2018-08-01 11:37:42 +01:00
Daniel García
fcb479a457 Merge pull request #108 from krankur/beta
Implementing PUT for 'api/ciphers/move' (#99)
2018-08-01 11:49:14 +02:00
Kumar Ankur
0e095a9fa4 change to reuse the logic for POST in PUT as well 2018-08-01 13:50:52 +05:30
Kumar Ankur
2f6aa3c363 Reverting removal of 'api/ciphers/move' POST as it is required for backward compatibility 2018-08-01 11:21:05 +05:30
Kumar Ankur
fcc485384f clean up 2018-08-01 04:12:46 +05:30
Kumar Ankur
91a2319325 Implementing PUT for ciphers/move (#99) 2018-08-01 03:58:47 +05:30
Daniel García
56b3afa77c Merge pull request #107 from shauder/bug/attachments_for_orgs
Bug/attachments for orgs
2018-07-31 20:08:05 +02:00
Shane A. Faulkner
d335f45e34 Bump version to 0.12.0 2018-07-31 12:07:03 -05:00
Shane A. Faulkner
34d2648509 Merge pull request #3 from shauder/master
Sync working branch with changes in master upstream
2018-07-31 12:05:52 -05:00
Shane A. Faulkner
f39c4fe2f4 Merge pull request #2 from dani-garcia/master
Sync local fork with upstream
2018-07-31 12:03:39 -05:00
Shane A. Faulkner
01875c395b Merge pull request #1 from mprasil/concurrency_fix
WAL journal mode and delete retry added
2018-07-31 11:39:45 -05:00
Miroslav Prasil
2872f40d13 WAL journal mode and delete retry added 2018-07-31 16:43:43 +01:00
Daniel García
07a30c8334 Merge pull request #106 from mprasil/beta_stable
Use stable release of v2.0.0
2018-07-30 15:43:41 +02:00
Miroslav Prasil
ceb3d0314d Use stable release of v2.0.0 2018-07-27 10:01:33 +01:00
mprasil
d7df545078 Merge pull request #104 from jcgruenhage/patch-1
Update matrix.to link in the README
2018-07-26 22:52:12 +01:00
Jan Christian Grünhage
d073f06652 Update matrix.to link in the README
Using the room ID instead of an alias isn't supposed to be working for joining rooms, and doesn't work when joining over federation. It only works when your server is already participating in the room.
2018-07-26 22:42:02 +01:00
Daniel García
3726da9c14 Merge pull request #103 from mprasil/https_doc_fix
Fixed the documentation for https (resolves #101)
2018-07-24 15:28:23 +02:00
Miroslav Prasil
51450a0df9 Fixed the documentation for https (resolves #101) 2018-07-24 12:32:41 +01:00
Daniel García
659f677897 Add missing slash, to put it like it was at first 2018-07-21 18:50:54 +02:00
Daniel García
a291dea16f Updated dependencies and Docker image to new web-vault 2018-07-21 17:27:00 +02:00
Shane A. Faulkner
98bae4a0a1 Cleanup and working with 2 or less attachments 2018-07-18 15:35:45 -05:00
Daniel García
48e69cebab Merge pull request #92 from mprasil/not_found
Return 404 in case the path doesn't match instead of 500
2018-07-18 14:07:28 +02:00
Daniel García
798a3b6a43 Merge pull request #91 from mprasil/worker_threads
Change number of workers in image, document the setting (fixes #90)
2018-07-18 14:06:53 +02:00
Miroslav Prasil
2dc1427027 Bump the version 2018-07-18 12:04:48 +01:00
Miroslav Prasil
233d23a527 Return 404 in case the path doesn't match instead of 500 2018-07-18 11:54:33 +01:00
Miroslav Prasil
06f7bd7c97 Change number of workers in image, document the setting (fixes #90) 2018-07-18 10:41:39 +01:00
Daniel García
458a238c38 Merge pull request #89 from mprasil/unconfirmed_guard
Add confirmed check to the OrgHeaders request guard
2018-07-17 11:54:13 +02:00
Miroslav Prasil
de72655bb1 Add confirmed check to the OrgHeaders request guard 2018-07-16 10:23:45 +01:00
Daniel García
4a2350891a Merge pull request #84 from mqus/patch-2
Reflect changes in Archlinux packaging
2018-07-15 12:04:28 +02:00
mqus
4677ae4ac6 Reflect changes in Archlinux packaging
I changed the way bitwarden_rs is packaged (the web interface is now an addon-package instead of bundled) and added a 'stable' package which follows recent releases.
 I assume that following releases instead of the master branch is encouraged so I removed the link to the (still existing) bitwarden_rs-git package which does the latter.
2018-07-15 00:42:17 +02:00
Shane A. Faulkner
31349a47d3 Very dirty addition of missing api's 2018-07-14 01:09:20 -05:00
Daniel García
55b7a3e4d1 Merge pull request #82 from mprasil/not_accepted_user
Do not show organization stuff to not accepted user
2018-07-13 18:42:38 +02:00
Miroslav Prasil
692ed81306 Do not show organization stuff to not accepted user 2018-07-13 17:21:19 +01:00
Daniel García
03172a6cd7 Bump version to 0.10.0 2018-07-13 16:06:01 +02:00
Daniel García
819622e310 Documented U2F, removed debug prints, and documented missing features 2018-07-13 15:58:50 +02:00
Daniel García
970863ffb1 Set facets contentType 2018-07-13 15:05:00 +02:00
Daniel García
e876d3077a Merge remote-tracking branch 'origin/master' into u2f 2018-07-13 13:59:45 +02:00
Daniel García
99d6742fac Merge pull request #81 from mprasil/readme_update
Readme updates (chat, arch, TOC)
2018-07-13 13:58:50 +02:00
Daniel García
75615bb5c8 Ignore U2F challenge if not provided. Also checked that error_code has to be 0 for a successfull registration 2018-07-13 12:37:46 +02:00
Miroslav Prasil
e271b246f3 Readme updates (chat, arch, TOC) 2018-07-13 10:48:49 +01:00
Daniel García
6378d96d1a Add some extra debug prints 2018-07-13 11:07:20 +02:00
Daniel García
c722256cbd Remove debug print 2018-07-13 00:40:59 +02:00
Daniel García
8ff50481e5 Use X-Forwarded-Host if available 2018-07-13 00:33:28 +02:00
Daniel García
be4e6c6f0c Merge branch 'master' into u2f 2018-07-12 23:54:56 +02:00
Daniel García
2f892cb866 Hide org ciphers from unconfirmed users (Showed deciption error) 2018-07-12 23:45:41 +02:00
Daniel García
4f6f510bd4 Improve domain detection, should fix attachment problems. Otherwise, set the DOMAIN env variable to the correct domain 2018-07-12 23:28:16 +02:00
Daniel García
dae92b9018 Implemented U2F, refactored Two Factor authentication, registering U2F device and authentication should work. Works on Chrome on MacOS with a virtual device. 2018-07-12 22:22:10 +02:00
mprasil
dde7c0d99b Merge pull request #74 from mprasil/env_fix
Move the ROCKET_ENV to the runtime image
2018-07-12 13:50:33 +01:00
Miroslav Prasil
79fccccad7 Move the ROCKET_ENV to the runtime image 2018-07-12 13:25:15 +01:00
mprasil
470ad14616 Merge pull request #72 from mqus/patch-1
Add a link to Archlinux AUR packages
2018-07-12 10:53:17 +01:00
Markus Richter
8d13e759fa Merge branch 'master' into patch-1 2018-07-12 11:25:45 +02:00
mqus
3bba02b364 Add aur links to BUILD.md 2018-07-12 11:19:30 +02:00
mqus
251c5c2348 remove aur link in README 2018-07-12 11:09:58 +02:00
mqus
f718827693 Add a link to the packaged AUR version
Hi!
I made two packages for Archlinux, one for the purists (without web interface) and one with the web interface and wanted to link to them here.
I'm not sure if that is the best position to advertise this, let me know if you think the links are better put elsewhere.

Apart from that: If you have any other suggestions/remarks for repackaging, please tell me. 

Currently I'm linking to the latest master commit, but I would really welcome it if you could make a release every now and then.
Thanks for your great work!
2018-07-12 02:23:12 +02:00
Daniel García
869352c361 Merge pull request #70 from dustyrip/dustyrip-patch-1
Enable user to change ROCKET_ENV for container
2018-07-11 21:17:45 +02:00
dustyrip
ca31f117d5 Allow users to specify ROCKET_ENV when starting container 2018-07-11 18:56:28 +00:00
Daniel García
1cb67eee69 Implement leave organization (accessed from the bottom of the user's settings page) 2018-07-11 16:30:03 +02:00
Daniel García
e88d8c856d Change host url to https when it's enabled, should fix some problems downloading attachments 2018-07-11 16:23:39 +02:00
Daniel García
ec37004dfe Merge pull request #68 from mprasil/unprivileged
Document running container with lower privileges (fixes #66)
2018-07-11 16:20:56 +02:00
Miroslav Prasil
03ce42e1cf Document running container with lower privileges (fixes #66) 2018-07-11 11:31:13 +01:00
Daniel García
3f56730b8a Merge pull request #65 from laxmanpradhan/master
updated README to include backup information
2018-07-10 13:41:06 +02:00
laxmanpradhan
57701d5213 typo 2018-07-09 14:51:13 -07:00
laxmanpradhan
f920441b28 updated key files infor 2018-07-09 14:48:51 -07:00
laxmanpradhan
203fb2e3e7 formatting for headings 2018-07-09 14:44:00 -07:00
laxmanpradhan
3c662de4f2 updated to include backup infromation 2018-07-09 14:41:12 -07:00
Daniel García
b1d1926249 Reorganized build instructions 2018-07-09 15:09:30 +02:00
Daniel García
c5dd1a03be Merge pull request #64 from mprasil/master
Update readme for docker hub
2018-07-08 16:19:34 +02:00
Daniel García
df598d7208 Log posible errors when attaching file 2018-07-06 17:23:12 +02:00
Miroslav Prasil
a0ae032ea7 Update readme for docker hub 2018-07-04 17:35:00 +01:00
Daniel García
35b4ad69bd Remove unused warnings 2018-07-04 14:27:47 +02:00
Daniel García
dfb348d630 Install CA certificates to make HTTPS connections work 2018-07-01 17:40:02 +02:00
Daniel García
22786c8c9d Merge pull request #55 from mprasil/debug_prints
Remove some extra debug prints
2018-07-01 16:02:18 +02:00
Daniel García
a1ffa4c28d Allow TOTP generation in organizations (Fixes #50) 2018-07-01 15:49:52 +02:00
Miroslav Prasil
9f8183deb0 Remove some extra debug prints 2018-07-01 14:48:18 +01:00
Daniel García
ea600ab2b8 Don't ignore errors while downloading icons 2018-07-01 15:27:42 +02:00
Daniel García
83da757dfb Merge pull request #54 from mprasil/delete-admin
Implement delete-admin call
2018-07-01 14:45:48 +02:00
Miroslav Prasil
d84d8d756f Implement delete-admin call 2018-07-01 12:43:11 +01:00
Daniel García
4fcdf33621 Merge pull request #47 from mprasil/limits
Add limits configuration to the readme
2018-06-30 17:48:34 +02:00
Miroslav Prasil
400a17a1ce Add limits configuration to the readme 2018-06-30 11:42:35 +01:00
Daniel García
15833e8d95 Added Rocket.toml to the final docker image 2018-06-30 01:51:53 +02:00
Daniel García
7d01947173 Updated dependencies and rust version 2018-06-29 23:18:19 +02:00
Daniel García
6aab2ae6c8 Document configuration a bit and increase JSON size limit to 10MB 2018-06-29 23:11:15 +02:00
Daniel García
64ac81b9ee Added unofficial note and better organized the README file 2018-06-29 14:47:23 +02:00
Daniel García
7c316fc19a Added security headers to web-vault (fixes #44) 2018-06-25 20:35:36 +02:00
Daniel García
1c45c2ec3a Implemented API endpoints to modify profile name and hint, and to change email address, fixes #43 2018-06-17 00:08:05 +02:00
Daniel García
0905355629 Fix wrong case in import struct, invite collections and user Uri back-compat 2018-06-13 14:39:29 +02:00
Daniel García
f24e754ff7 Merge pull request #41 from mprasil/toolchain_build
Use proper toolchain in Dockerfile
2018-06-13 12:08:06 +02:00
Miroslav Prasil
0260667f7a Use proper toolchain in Dockerfile 2018-06-12 23:48:18 +01:00
Daniel García
7983ce4f13 Updated global domains file 2018-06-12 23:24:49 +02:00
Daniel García
5fc0472d88 Removed unneeded cipher code for changing case (fixed by last commit) 2018-06-12 23:15:27 +02:00
Daniel García
410ee9f1f7 Fixed case problems, hopefully this time for real 2018-06-12 23:01:14 +02:00
Daniel García
538dc00234 Improved configuration and documented options. Implemented option to disable web vault and to disable the use of bitwarden's official icon servers 2018-06-12 21:09:42 +02:00
Daniel García
515c84d74d Fixed casing issue 2018-06-12 18:01:11 +02:00
Daniel García
f72efa899e Updated dependencies and created 'rust-toolchain', to mark a working nightly to rustup users, and hopefully avoid some nightly breakage. 2018-06-12 17:30:36 +02:00
Daniel García
483066b9a0 Some style changes, removed useless matches and formats 2018-06-11 15:44:37 +02:00
Daniel García
57850a3379 Fix SSN field in Identity cipher not loading correctly
It needs to be all uppercase otherwise the web vault doesn't load it
2018-06-01 23:16:10 +02:00
Daniel García
3b09750b76 Merge pull request #39 from mprasil/vault_update
Update Vault to 1.27.0
2018-06-01 23:11:10 +02:00
Miroslav Prasil
0da4a8fc8a Update Vault to 1.27.0 2018-06-01 21:24:23 +01:00
49 changed files with 4666 additions and 1387 deletions

52
.env
View File

@@ -1,13 +1,55 @@
## Bitwarden_RS Configuration File
## Uncomment any of the following lines to change the defaults
## Main data folder
# DATA_FOLDER=data
## Individual folders, these override %DATA_FOLDER%
# DATABASE_URL=data/db.sqlite3
# PRIVATE_RSA_KEY=data/private_rsa_key.der
# PUBLIC_RSA_KEY=data/public_rsa_key.der
# RSA_KEY_FILENAME=data/rsa_key
# ICON_CACHE_FOLDER=data/icon_cache
# ATTACHMENTS_FOLDER=data/attachments
# true for yes, anything else for no
SIGNUPS_ALLOWED=true
## Web vault settings
# WEB_VAULT_FOLDER=web-vault/
# WEB_VAULT_ENABLED=true
# ROCKET_ENV=production
## Controls the WebSocket server port
# WEBSOCKET_PORT=3012
## Controls if new users can register
# SIGNUPS_ALLOWED=true
## Use a local favicon extractor
## Set to false to use bitwarden's official icon servers
## Set to true to use the local version, which is not as smart,
## but it doesn't send the cipher domains to bitwarden's servers
# LOCAL_ICON_EXTRACTOR=false
## Controls the PBBKDF password iterations to apply on the server
## The change only applies when the password is changed
# PASSWORD_ITERATIONS=100000
## Whether password hint should be sent into the error response when the client request it
# SHOW_PASSWORD_HINT=true
## Domain settings
## The domain must match the address from where you access the server
## Unless you are using U2F, or having problems with attachments not downloading, there is no need to change this
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
# DOMAIN=https://bw.domain.tld:8443
## Rocket specific settings, check Rocket documentation to learn more
# ROCKET_ENV=staging
# ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app
# ROCKET_PORT=8000
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
# SMTP_HOST=smtp.domain.tld
# SMTP_FROM=bitwarden-rs@domain.tld
# SMTP_PORT=587
# SMTP_SSL=true
# SMTP_USERNAME=username
# SMTP_PASSWORD=password

7
.travis.yml Normal file
View File

@@ -0,0 +1,7 @@
# Copied from Rocket's .travis.yml
language: rust
sudo: required # so we get a VM with higher specs
dist: trusty # so we get a VM with higher specs
cache: cargo
rust:
- nightly

70
BUILD.md Normal file
View File

@@ -0,0 +1,70 @@
# Build instructions
## Dependencies
- `Rust nightly` (strongly recommended to use [rustup](https://rustup.rs/))
- `OpenSSL` (should be available in path, install through your system's package manager or use the [prebuilt binaries](https://wiki.openssl.org/index.php/Binaries))
- `NodeJS` (required to build the web-vault, (install through your system's package manager or use the [prebuilt binaries](https://nodejs.org/en/download/))
## Run/Compile
```sh
# Compile and run
cargo run
# or just compile (binary located in target/release/bitwarden_rs)
cargo build --release
```
When run, the server is accessible in [http://localhost:80](http://localhost:80).
### Install the web-vault
Clone the git repository at [bitwarden/web](https://github.com/bitwarden/web) and checkout the latest release tag (e.g. v2.1.1):
```sh
# clone the repository
git clone https://github.com/bitwarden/web.git web-vault
cd web-vault
# switch to the latest tag
git checkout "$(git tag | tail -n1)"
```
Apply the patch file from `docker/set-vault-baseurl.patch`:
```sh
# In the Vault repository directory
git apply /path/to/bitwarden_rs/docker/set-vault-baseurl.patch
```
Then, build the Vault:
```sh
npm run sub:init
npm install
npm run dist
```
Finally copy the contents of the `build` folder into the `bitwarden_rs/web-vault` folder.
# Configuration
The available configuration options are documented in the default `.env` file, and they can be modified by uncommenting the desired options in that file or by setting their respective environment variables. Look at the README file for the main configuration options available.
Note: the environment variables override the values set in the `.env` file.
## How to recreate database schemas (for developers)
Install diesel-cli with cargo:
```sh
cargo install diesel_cli --no-default-features --features sqlite-bundled
```
Make sure that the correct path to the database is in the `.env` file.
If you want to modify the schemas, create a new migration with:
```
diesel migration generate <name>
```
Modify the *.sql files, making sure that any changes are reverted in the down.sql file.
Apply the migrations and save the generated schemas as follows:
```sh
diesel migration redo
# This step should be done automatically when using diesel-cli > 1.3.0
# diesel print-schema > src/db/schema.rs
```

1684
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,40 +1,49 @@
[package]
name = "bitwarden_rs"
version = "0.9.0"
version = "1.0.0"
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
[dependencies]
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
rocket = { version = "0.3.12", features = ["tls"] }
rocket_codegen = "0.3.12"
rocket_contrib = "0.3.12"
rocket = { version = "0.3.16", features = ["tls"] }
rocket_codegen = "0.3.16"
rocket_contrib = "0.3.16"
# HTTP client
reqwest = "0.8.6"
reqwest = "0.9.0"
# multipart/form-data support
multipart = "0.14.2"
multipart = "0.15.3"
# WebSockets library
ws = "0.7.8"
# MessagePack library
rmpv = "0.4.0"
# Concurrent hashmap implementation
chashmap = "2.2.0"
# A generic serialization/deserialization framework
serde = "1.0.64"
serde_derive = "1.0.64"
serde_json = "1.0.19"
serde = "1.0.79"
serde_derive = "1.0.79"
serde_json = "1.0.28"
# A safe, extensible ORM and Query builder
diesel = { version = "~1.2.2", features = ["sqlite", "chrono", "r2d2"] }
diesel_migrations = { version = "~1.2.0", features = ["sqlite"] }
diesel = { version = "1.3.3", features = ["sqlite", "chrono", "r2d2"] }
diesel_migrations = { version = "1.3.0", features = ["sqlite"] }
# Bundled SQLite
libsqlite3-sys = { version = "0.9.1", features = ["bundled"] }
libsqlite3-sys = { version = "0.9.3", features = ["bundled"] }
# Crypto library
ring = { version = "= 0.11.0", features = ["rsa_signing"] }
# UUID generation
uuid = { version = "0.6.5", features = ["v4"] }
uuid = { version = "0.7.1", features = ["v4"] }
# Date and time library for Rust
chrono = "0.4.2"
chrono = "0.4.6"
# TOTP library
oath = "0.10.2"
@@ -45,11 +54,34 @@ data-encoding = "2.1.1"
# JWT library
jsonwebtoken = "= 4.0.1"
# U2F library
u2f = "0.1.2"
# A `dotenv` implementation for Rust
dotenv = { version = "0.13.0", default-features = false }
# Lazy static macro
lazy_static = "1.0.1"
lazy_static = "1.1.0"
# Numerical libraries
num-traits = "0.2.6"
num-derive = "0.2.2"
# Email libraries
lettre = "0.9.0"
lettre_email = "0.9.0"
native-tls = "0.2.1"
fast_chemail = "0.9.5"
# Number encoding library
byteorder = "1.2.6"
[patch.crates-io]
jsonwebtoken = { path = "libs/jsonwebtoken" } # Make jwt use ring 0.11, to match rocket
# Make jwt use ring 0.11, to match rocket
jsonwebtoken = { path = "libs/jsonwebtoken" }
rmp = { git = 'https://github.com/dani-garcia/msgpack-rust' }
lettre = { git = 'https://github.com/lettre/lettre', rev = 'fc91bb6ee8f9a' }
lettre_email = { git = 'https://github.com/lettre/lettre', rev = 'fc91bb6ee8f9a' }
# Version 0.1.2 from crates.io lacks a commit that fixes a certificate error
u2f = { git = 'https://github.com/wisespace-io/u2f-rs', rev = '193de35093a44' }

View File

@@ -2,36 +2,32 @@
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM node:9-alpine as vault
FROM node:8-alpine as vault
ENV VAULT_VERSION "1.26.0"
ENV URL "https://github.com/bitwarden/web/archive/v${VAULT_VERSION}.tar.gz"
ENV VAULT_VERSION "v2.3.0"
ENV URL "https://github.com/bitwarden/web.git"
RUN apk add --update-cache --upgrade \
curl \
git \
tar \
&& npm install -g \
gulp-cli \
gulp
RUN mkdir /web-build \
&& cd /web-build \
&& curl -L "${URL}" | tar -xvz --strip-components=1
tar
RUN git clone -b $VAULT_VERSION --depth 1 $URL web-build
WORKDIR /web-build
COPY /docker/settings.Production.json /web-build/
COPY /docker/set-vault-baseurl.patch /web-build/
RUN git apply set-vault-baseurl.patch
RUN git config --global url."https://github.com/".insteadOf ssh://git@github.com/ \
&& npm install \
&& gulp dist:selfHosted \
&& mv dist /web-vault
RUN npm run sub:init && npm install
RUN npm run dist \
&& mv build /web-vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rustlang/rust:nightly as build
FROM rust as build
# Using bundled SQLite, no need to install it
# RUN apt-get update && apt-get install -y\
@@ -46,6 +42,7 @@ WORKDIR /app
# Copies over *only* your manifests and vendored dependencies
COPY ./Cargo.* ./
COPY ./libs ./libs
COPY ./rust-toolchain ./rust-toolchain
# Builds your dependencies and removes the
# dummy project, except the target folder
@@ -66,23 +63,27 @@ RUN cargo build --release
# because we already have a binary built
FROM debian:stretch-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_WORKERS=10
# Install needed libraries
RUN apt-get update && apt-get install -y\
openssl\
ca-certificates\
--no-install-recommends\
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (env file and web-vault)
# and the binary from the "build" stage to the current stage
COPY .env .
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs .
# Configures the startup!
# Use production to disable Rocket logging
#CMD ROCKET_ENV=production ./bitwarden_rs
CMD ROCKET_ENV=staging ./bitwarden_rs
CMD ./bitwarden_rs

81
Dockerfile.alpine Normal file
View File

@@ -0,0 +1,81 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM node:8-alpine as vault
ENV VAULT_VERSION "v2.3.0"
ENV URL "https://github.com/bitwarden/web.git"
RUN apk add --update-cache --upgrade \
curl \
git \
tar
RUN git clone -b $VAULT_VERSION --depth 1 $URL web-build
WORKDIR /web-build
COPY /docker/set-vault-baseurl.patch /web-build/
RUN git apply set-vault-baseurl.patch
RUN npm run sub:init && npm install
RUN npm run dist \
&& mv build /web-vault
########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary
FROM clux/muslrust:nightly-2018-08-24 as build
# Creates a dummy project used to grab dependencies
RUN USER=root cargo init --bin
# Copies over *only* your manifests and vendored dependencies
COPY ./Cargo.* ./
COPY ./libs ./libs
COPY ./rust-toolchain ./rust-toolchain
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM alpine:3.8
ENV ROCKET_ENV "staging"
ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs
# Install needed libraries
RUN apk add \
openssl\
ca-certificates \
&& rm /var/cache/apk/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (env file and web-vault)
# and the binary from the "build" stage to the current stage
COPY .env .
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /volume/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
# Configures the startup!
CMD ./bitwarden_rs

113
Dockerfile.armv7 Normal file
View File

@@ -0,0 +1,113 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM node:8-alpine as vault
ENV VAULT_VERSION "v2.3.0"
ENV URL "https://github.com/bitwarden/web.git"
RUN apk add --update-cache --upgrade \
curl \
git \
tar
RUN git clone -b $VAULT_VERSION --depth 1 $URL web-build
WORKDIR /web-build
COPY /docker/set-vault-baseurl.patch /web-build/
RUN git apply set-vault-baseurl.patch
RUN npm run sub:init && npm install
RUN npm run dist \
&& mv build /web-vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust as build
RUN apt-get update \
&& apt-get install -y \
gcc-arm-linux-gnueabihf \
&& mkdir -p ~/.cargo \
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin app
WORKDIR /app
# Copies over *only* your manifests and vendored dependencies
COPY ./Cargo.* ./
COPY ./libs ./libs
COPY ./rust-toolchain ./rust-toolchain
# Prepare openssl armhf libs
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armhf \
&& apt-get update \
&& apt-get install -y \
libssl-dev:armhf \
libc6-dev:armhf
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
COPY . .
RUN rustup target add armv7-unknown-linux-gnueabihf
RUN cargo build --release --target=armv7-unknown-linux-gnueabihf -v
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --release --target=armv7-unknown-linux-gnueabihf -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM resin/armv7hf-debian:stretch
ENV ROCKET_ENV "staging"
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y\
openssl\
ca-certificates\
--no-install-recommends\
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
# Copies the files from the context (env file and web-vault)
# and the binary from the "build" stage to the current stage
COPY .env .
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
# Configures the startup!
CMD ./bitwarden_rs

95
PROXY.md Normal file
View File

@@ -0,0 +1,95 @@
# Proxy examples
In this document, `<SERVER>` refers to the IP or domain where bitwarden_rs is accessible from. If both the proxy and bitwarden_rs are running in the same system, simply use `localhost`.
The ports proxied by default are `80` for the web server and `3012` for the WebSocket server. The proxies are configured to listen in port `443` with HTTPS enabled, which is recommended.
When using a proxy, it's preferrable to configure HTTPS at the proxy level and not at the application level, this way the WebSockets connection is also secured.
## Caddy
```nginx
localhost:443 {
# The negotiation endpoint is also proxied to Rocket
proxy /notifications/hub/negotiate <SERVER>:80 {
transparent
}
# Notifications redirected to the websockets server
proxy /notifications/hub <SERVER>:3012 {
websocket
}
# Proxy the Root directory to Rocket
proxy / <SERVER>:80 {
transparent
}
tls ${SSLCERTIFICATE} ${SSLKEY}
}
```
## Nginx (by shauder)
```nginx
server {
include conf.d/ssl/ssl.conf;
listen 443 ssl http2;
server_name vault.*;
location /notifications/hub/negotiate {
include conf.d/proxy-confs/proxy.conf;
proxy_pass http://<SERVER>:80;
}
location / {
include conf.d/proxy-confs/proxy.conf;
proxy_pass http://<SERVER>:80;
}
location /notifications/hub {
proxy_pass http://<SERVER>:3012;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
```
## Apache (by fbartels)
```apache
<VirtualHost *:443>
SSLEngine on
ServerName bitwarden.$hostname.$domainname
SSLCertificateFile ${SSLCERTIFICATE}
SSLCertificateKeyFile ${SSLKEY}
SSLCACertificateFile ${SSLCA}
${SSLCHAIN}
ErrorLog \${APACHE_LOG_DIR}/bitwarden-error.log
CustomLog \${APACHE_LOG_DIR}/bitwarden-access.log combined
RewriteEngine On
RewriteCond %{HTTP:Upgrade} =websocket [NC]
RewriteRule /(.*) ws://<SERVER>:3012/$1 [P,L]
ProxyPass / http://<SERVER>:80/
ProxyPreserveHost On
ProxyRequests Off
</VirtualHost>
```
## Traefik (docker-compose example)
```traefik
labels:
- 'traefik.frontend.rule=Host:vault.example.local'
- 'traefik.docker.network=traefik'
- 'traefik.port=80'
- 'traefik.enable=true'
- 'traefik.web.frontend.rule=Host:vault.example.local'
- 'traefik.web.port=80'
- 'traefik.hub.frontend.rule=Path:/notifications/hub'
- 'traefik.hub.port=3012'
- 'traefik.negotiate.frontend.rule=Path:/notifications/hub/negotiate'
- 'traefik.negotiate.port=80'
```

497
README.md
View File

@@ -1,97 +1,458 @@
## Easy setup (Docker)
Install Docker to your system and then, from the project root, run:
### This is a Bitwarden server API implementation written in Rust compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
---
[![Travis Build Status](https://travis-ci.org/dani-garcia/bitwarden_rs.svg?branch=master)](https://travis-ci.org/dani-garcia/bitwarden_rs)
[![Dependency Status](https://deps.rs/repo/github/dani-garcia/bitwarden_rs/status.svg)](https://deps.rs/repo/github/dani-garcia/bitwarden_rs)
[![GitHub Release](https://img.shields.io/github/release/dani-garcia/bitwarden_rs.svg)](https://github.com/dani-garcia/bitwarden_rs/releases/latest)
[![GPL-3.0 Licensed](https://img.shields.io/github/license/dani-garcia/bitwarden_rs.svg)](https://github.com/dani-garcia/bitwarden_rs/blob/master/LICENSE.txt)
[![Matrix Chat](https://matrix.to/img/matrix-badge.svg)](https://matrix.to/#/#bitwarden_rs:matrix.org)
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/bitwarden_rs).
_*Note, that this project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC._
---
**Table of contents**
- [Features](#features)
- [Missing features](#missing-features)
- [Docker image usage](#docker-image-usage)
- [Starting a container](#starting-a-container)
- [Updating the bitwarden image](#updating-the-bitwarden-image)
- [Configuring bitwarden service](#configuring-bitwarden-service)
- [Disable registration of new users](#disable-registration-of-new-users)
- [Disable invitations](#disable-invitations)
- [Enabling HTTPS](#enabling-https)
- [Enabling WebSocket notifications](#enabling-websocket-notifications)
- [Enabling U2F authentication](#enabling-u2f-authentication)
- [Changing persistent data location](#changing-persistent-data-location)
- [/data prefix:](#data-prefix)
- [database name and location](#database-name-and-location)
- [attachments location](#attachments-location)
- [icons cache](#icons-cache)
- [Changing the API request size limit](#changing-the-api-request-size-limit)
- [Changing the number of workers](#changing-the-number-of-workers)
- [SMTP configuration](#smtp-configuration)
- [Password hint display](#password-hint-display)
- [Disabling or overriding the Vault interface hosting](#disabling-or-overriding-the-vault-interface-hosting)
- [Other configuration](#other-configuration)
- [Building your own image](#building-your-own-image)
- [Building binary](#building-binary)
- [Available packages](#available-packages)
- [Arch Linux](#arch-linux)
- [Backing up your vault](#backing-up-your-vault)
- [1. the sqlite3 database](#1-the-sqlite3-database)
- [2. the attachments folder](#2-the-attachments-folder)
- [3. the key files](#3-the-key-files)
- [4. Icon Cache](#4-icon-cache)
- [Running the server with non-root user](#running-the-server-with-non-root-user)
- [Differences from upstream API implementation](#differences-from-upstream-api-implementation)
- [Changing user email](#changing-user-email)
- [Creating organization](#creating-organization)
- [Inviting users into organization](#inviting-users-into-organization)
- [Running on unencrypted connection](#running-on-unencrypted-connection)
- [Get in touch](#get-in-touch)
## Features
Basically full implementation of Bitwarden API is provided including:
* Basic single user functionality
* Organizations support
* Attachments
* Vault API support
* Serving the static files for Vault interface
* Website icons API
* Authenticator and U2F support
## Missing features
* Email confirmation
* Other two-factor systems:
* YubiKey OTP (if your key supports U2F, you can use that)
* Duo
* Email codes
## Docker image usage
### Starting a container
The persistent data is stored under /data inside the container, so the only requirement for persistent deployment using Docker is to mount persistent volume at the path:
```
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 mprasil/bitwarden:latest
```
This will preserve any persistent data under `/bw-data/`, you can adapt the path to whatever suits you.
The service will be exposed on port 80.
### Updating the bitwarden image
Updating is straightforward, you just make sure to preserve the mounted volume. If you used the bind-mounted path as in the example above, you just need to `pull` the latest image, `stop` and `rm` the current container and then start a new one the same way as before:
```sh
# Pull the latest version
docker pull mprasil/bitwarden:latest
# Stop and remove the old container
docker stop bitwarden
docker rm bitwarden
# Start new container with the data mounted
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 mprasil/bitwarden:latest
```
Then visit [http://localhost:80](http://localhost:80)
In case you didn't bind mount the volume for persistent data, you need an intermediate step where you preserve the data with an intermediate container:
```sh
# Pull the latest version
docker pull mprasil/bitwarden:latest
# Create intermediate container to preserve data
docker run --volumes-from bitwarden --name bitwarden_data busybox true
# Stop and remove the old container
docker stop bitwarden
docker rm bitwarden
# Start new container with the data mounted
docker run -d --volumes-from bitwarden_data --name bitwarden -p 80:80 mprasil/bitwarden:latest
# Optionally remove the intermediate container
docker rm bitwarden_data
# Alternatively you can keep data container around for future updates in which case you can skip last step.
```
## Configuring bitwarden service
### Disable registration of new users
By default new users can register, if you want to disable that, set the `SIGNUPS_ALLOWED` env variable to `false`:
```sh
docker run -d --name bitwarden \
-e SIGNUPS_ALLOWED=false \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
Note: While users can't register on their own, they can still be invited by already registered users. Read bellow if you also want to disable that.
### Disable invitations
Even when registration is disabled, organization administrators or owners can invite users to join organization. This won't send email invitation to the users, but after they are invited, they can register with the invited email even if `SIGNUPS_ALLOWED` is actually set to `false`. You can disable this functionality completely by setting `INVITATIONS_ALLOWED` env variable to `false`:
```sh
docker run -d --name bitwarden \
-e SIGNUPS_ALLOWED=false \
-e INVITATIONS_ALLOWED=false \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
### Enabling HTTPS
To enable HTTPS, you need to configure the `ROCKET_TLS`.
The values to the option must follow the format:
```
ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
```
Where:
- certs: a path to a certificate chain in PEM format
- key: a path to a private key file in PEM format for the certificate in certs
```sh
docker run -d --name bitwarden \
-e ROCKET_TLS='{certs="/ssl/certs.pem",key="/ssl/key.pem"}' \
-v /ssl/keys/:/ssl/ \
-v /bw-data/:/data/ \
-p 443:80 \
mprasil/bitwarden:latest
```
Note that you need to mount ssl files and you need to forward appropriate port.
Softwares used for getting certs are often using symlinks. If that is the case, both locations need to be accessible to the docker container.
Example: [certbot](https://certbot.eff.org/) will create a folder that contains the needed `cert.pem` and `privacy.pem` files in `/etc/letsencrypt/live/mydomain/`
These files are symlinked to `../../archive/mydomain/mykey.pem`
So to use from bitwarden container:
```sh
docker run -d --name bitwarden \
-e ROCKET_TLS='{certs="/ssl/live/mydomain/cert.pem",key="/ssl/live/mydomain/privkey.pem"}' \
-v /etc/letsencrypt/:/ssl/ \
-v /bw-data/:/data/ \
-p 443:80 \
mprasil/bitwarden:latest
```
### Enabling WebSocket notifications
*Important: This does not apply to the mobile clients, which use push notifications.*
To enable WebSockets notifications, an external reverse proxy is necessary, and it must be configured to do the following:
- Route the `/notifications/hub` endpoint to the WebSocket server, by default at port `3012`, making sure to pass the `Connection` and `Upgrade` headers.
- Route everything else, including `/notifications/hub/negotiate`, to the standard Rocket server, by default at port `80`.
- If using Docker, you may need to map both ports with the `-p` flag
Example configurations are included in the [PROXY.md](https://github.com/dani-garcia/bitwarden_rs/blob/master/PROXY.md) file.
Note: The reason for this workaround is the lack of support for WebSockets from Rocket (though [it's a planned feature](https://github.com/SergioBenitez/Rocket/issues/90)), which forces us to launch a secondary server on a separate port.
### Enabling U2F authentication
To enable U2F authentication, you must be serving bitwarden_rs from an HTTPS domain with a valid certificate (Either using the included
HTTPS options or with a reverse proxy). We recommend using a free certificate from Let's Encrypt.
After that, you need to set the `DOMAIN` environment variable to the same address from where bitwarden_rs is being served:
```sh
docker run -d --name bitwarden \
-e DOMAIN=https://bw.domain.tld \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
Note that the value has to include the `https://` and it may include a port at the end (in the format of `https://bw.domain.tld:port`) when not using `443`.
### Changing persistent data location
#### /data prefix:
By default all persistent data is saved under `/data`, you can override this path by setting the `DATA_FOLDER` env variable:
```sh
docker run -d --name bitwarden \
-e DATA_FOLDER=/persistent \
-v /bw-data/:/persistent/ \
-p 80:80 \
mprasil/bitwarden:latest
```
Notice, that you need to adapt your volume mount accordingly.
#### database name and location
Default is `$DATA_FOLDER/db.sqlite3`, you can change the path specifically for database using `DATABASE_URL` variable:
```sh
docker run -d --name bitwarden \
-e DATABASE_URL=/database/bitwarden.sqlite3 \
-v /bw-data/:/data/ \
-v /bw-database/:/database/ \
-p 80:80 \
mprasil/bitwarden:latest
```
Note, that you need to remember to mount the volume for both database and other persistent data if they are different.
#### attachments location
Default is `$DATA_FOLDER/attachments`, you can change the path using `ATTACHMENTS_FOLDER` variable:
```sh
docker run -d --name bitwarden \
-e ATTACHMENTS_FOLDER=/attachments \
-v /bw-data/:/data/ \
-v /bw-attachments/:/attachments/ \
-p 80:80 \
mprasil/bitwarden:latest
```
Note, that you need to remember to mount the volume for both attachments and other persistent data if they are different.
#### icons cache
Default is `$DATA_FOLDER/icon_cache`, you can change the path using `ICON_CACHE_FOLDER` variable:
```sh
docker run -d --name bitwarden \
-e ICON_CACHE_FOLDER=/icon_cache \
-v /bw-data/:/data/ \
-v /icon_cache/ \
-p 80:80 \
mprasil/bitwarden:latest
```
Note, that in the above example we don't mount the volume locally, which means it won't be persisted during the upgrade unless you use intermediate data container using `--volumes-from`. This will impact performance as bitwarden will have to re-download the icons on restart, but might save you from having stale icons in cache as they are not automatically cleaned.
### Changing the API request size limit
By default the API calls are limited to 10MB. This should be sufficient for most cases, however if you want to support large imports, this might be limiting you. On the other hand you might want to limit the request size to something smaller than that to prevent API abuse and possible DOS attack, especially if running with limited resources.
To set the limit, you can use the `ROCKET_LIMITS` variable. Example here shows 10MB limit for posted json in the body (this is the default):
```sh
docker run -d --name bitwarden \
-e ROCKET_LIMITS={json=10485760} \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
### Changing the number of workers
When you run bitwarden_rs, it spawns `2 * <number of cpu cores>` workers to handle requests. On some systems this might lead to low number of workers and hence slow performance, so the default in the docker image is changed to spawn 10 threads. You can override this setting to increase or decrease the number of workers by setting the `ROCKET_WORKERS` variable.
In the example bellow, we're starting with 20 workers:
```sh
docker run -d --name bitwarden \
-e ROCKET_WORKERS=20 \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
### SMTP configuration
You can configure bitwarden_rs to send emails via a SMTP agent:
```sh
docker run -d --name bitwarden \
-e SMTP_HOST=<smtp.domain.tld> \
-e SMTP_FROM=<bitwarden@domain.tld> \
-e SMTP_PORT=587 \
-e SMTP_SSL=true \
-e SMTP_USERNAME=<username> \
-e SMTP_PASSWORD=<password> \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
When `SMTP_SSL` is set to `true`(this is the default), only TLSv1.1 and TLSv1.2 protocols will be accepted and `SMTP_PORT` will default to `587`. If set to `false`, `SMTP_PORT` will default to `25` and the connection won't be encrypted. This can be very insecure, use this setting only if you know what you're doing.
### Password hint display
Usually, password hints are sent by email. But as bitwarden_rs is made with small or personal deployment in mind, hints are also available from the password hint page, so you don't have to configure an email service. If you want to disable this feature, you can use the `SHOW_PASSWORD_HINT` variable:
```sh
docker run -d --name bitwarden \
-e SHOW_PASSWORD_HINT=false \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
### Disabling or overriding the Vault interface hosting
As a convenience bitwarden_rs image will also host static files for Vault web interface. You can disable this static file hosting completely by setting the WEB_VAULT_ENABLED variable.
```sh
docker run -d --name bitwarden \
-e WEB_VAULT_ENABLED=false \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
Alternatively you can override the Vault files and provide your own static files to host. You can do that by mounting a path with your files over the `/web-vault` directory in the container. Just make sure the directory contains at least `index.html` file.
```sh
docker run -d --name bitwarden \
-v /path/to/static/files_directory:/web-vault \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
Note that you can also change the path where bitwarden_rs looks for static files by providing the `WEB_VAULT_FOLDER` environment variable with the path.
### Other configuration
Though this is unlikely to be required in small deployment, you can fine-tune some other settings like number of workers using environment variables that are processed by [Rocket](https://rocket.rs), please see details in [documentation](https://rocket.rs/guide/configuration/#environment-variables).
## Building your own image
Clone the repository, then from the root of the repository run:
```sh
# Build the docker image:
docker build -t dani/bitwarden_rs .
# Run the docker image with a docker volume:
docker volume create bw_data
docker run --name bitwarden_rs -t --init --rm --mount source=bw_data,target=/data -p 8000:80 dani/bitwarden_rs
docker build -t bitwarden_rs .
```
#### Other possible Docker options
## Building binary
To run the container in the background, add the `-d` parameter.
For building binary outside the Docker environment and running it locally without docker, please see [build instructions](https://github.com/dani-garcia/bitwarden_rs/blob/master/BUILD.md).
To check the logs when in background, run `docker logs bitwarden_rs`
## Available packages
To stop the container in background, run `docker stop bitwarden_rs`
### Arch Linux
To make sure the container is restarted automatically, add the `--restart unless-stopped` parameter
Bitwarden_rs is already packaged for Archlinux thanks to @mqus. There is an [AUR package](https://aur.archlinux.org/packages/bitwarden_rs) (optionally with the [vault web interface](https://aur.archlinux.org/packages/bitwarden_rs-vault/) ) available.
## Backing up your vault
### 1. the sqlite3 database
The sqlite3 database should be backed up using the proper sqlite3 backup command. This will ensure the database does not become corrupted if the backup happens during a database write.
To run the image with a host bind, change the `--mount` parameter to:
```
--mount type=bind,source=<absolute_path>,target=/data
sqlite3 /$DATA_FOLDER/db.sqlite3 ".backup '/$DATA_FOLDER/db-backup/backup.sq3'"
```
Where <absolute_path> is an absolute path in the hosts file system (e.g. C:\bitwarden\data)
This command can be run via a CRON job everyday, however note that it will overwrite the same backup.sq3 file each time. This backup file should therefore be saved via incremental backup either using a CRON job command that appends a timestamp or from another backup app such as Duplicati.
### 2. the attachments folder
## How to compile bitwarden_rs
Install `rust nightly`, in Windows the recommended way is through `rustup`.
By default, this is located in `$DATA_FOLDER/attachments`
Install the `openssl` library, in Windows the best option is Microsoft's `vcpkg`,
on other systems use their respective package managers.
### 3. the key files
This is optional, these are only used to store tokens of users currently logged in, deleting them would simply log each user out forcing them to log in again. By default, these are located in the `$DATA_FOLDER` (by default /data in the docker). There are 3 files: rsa_key.der, rsa_key.pem, rsa_key.pub.der.
### 4. Icon Cache
This is optional, the icon cache can re-download itself however if you have a large cache, it may take a long time. By default it is located in `$DATA_FOLDER/icon_cache`
## Running the server with non-root user
The root user inside the container is already pretty limited in what it can do, so the default setup should be secure enough. However if you wish to go the extra mile to avoid using root even in container, here's how you can do that:
1. Create a data folder that's owned by non-root user, so you can use that user to write persistent data. Get the user `id`. In linux you can run `stat <folder_name>` to get/verify the owner ID.
2. When you run the container, you need to provide the user ID as one of the parameters. Note that this needs to be in the numeric form and not the user name, because docker would try to find such user defined inside the image, which would likely not be there or it would have different ID than your local user and hence wouldn't be able to write the persistent data. This can be done with the `--user` parameter.
3. bitwarden_rs listens on port `80` inside the container by default, this [won't work with non-root user](https://www.w3.org/Daemon/User/Installation/PrivilegedPorts.html), because regular users aren't allowed to open port bellow `1024`. To overcome this, you need to configure server to listen on a different port, you can use `ROCKET_PORT` to do that.
Here's sample docker run, that uses user with id `1000` and with the port redirection configured, so that inside container the service is listening on port `8080` and docker translates that to external (host) port `80`:
Then run:
```sh
cargo run
# or
cargo build
docker run -d --name bitwarden \
--user 1000 \
-e ROCKET_PORT=8080 \
-v /bw-data/:/data/ \
-p 80:8080 \
mprasil/bitwarden:latest
```
## How to install the web-vault locally
If you're using docker image, you can just update `VAULT_VERSION` variable in Dockerfile and rebuild the image.
## Differences from upstream API implementation
Install `node.js` and either `yarn` or `npm` (usually included with node)
### Changing user email
Clone the web-vault outside the project:
```
git clone https://github.com/bitwarden/web.git web-vault
```
Because we don't have any SMTP functionality at the moment, there's no way to deliver the verification token when you try to change the email. User just needs to enter any random token to continue and the change will be applied.
Modify `web-vault/settings.Production.json` to look like this:
```json
{
"appSettings": {
"apiUri": "/api",
"identityUri": "/identity",
"iconsUri": "/icons",
"stripeKey": "",
"braintreeKey": ""
}
}
```
### Creating organization
Then, run the following from the `web-vault` dir:
```sh
# With yarn (recommended)
yarn
yarn gulp dist:selfHosted
We use upstream Vault interface directly without any (significant) changes, this is why user is presented with paid options when creating organization. To create an organization, just use the free option, none of the limits apply when using bitwarden_rs as back-end API and after the organization is created it should behave like Enterprise organization.
# With npm
npm install
npx gulp dist:selfHosted
```
### Inviting users into organization
Finally copy the contents of the `web-vault/dist` folder into the `bitwarden_rs/web-vault` folder.
The invited users won't get the invitation email, instead all already registered users will appear in the interface as if they already accepted the invitation. Organization admin then just needs to confirm them to be proper Organization members and to give them access to the shared secrets.
## How to recreate database schemas
Install diesel-cli with cargo:
```sh
cargo install diesel_cli --no-default-features --features sqlite-bundled # Or use only sqlite to use the system version
```
Invited users, that aren't registered yet will show up in the Organization admin interface as "Invited". At the same time an invitation record is created that allows the users to register even if [user registration is disabled](#disable-registration-of-new-users). (unless you [disable this functionality](#disable-invitations)) They will automatically become "Accepted" once they register. From there Organization admin can confirm them to give them access to Organization.
Make sure that the correct path to the database is in the `.env` file.
### Running on unencrypted connection
If you want to modify the schemas, create a new migration with:
```
diesel migration generate <name>
```
It is strongly recommended to run bitwarden_rs service over HTTPS. However the server itself while [supporting it](#enabling-https) does not strictly require such setup. This makes it a bit easier to spin up the service in cases where you can generally trust the connection (internal and secure network, access over VPN,..) or when you want to put the service behind HTTP proxy, that will do the encryption on the proxy end.
Modify the *.sql files, making sure that any changes are reverted in the down.sql file.
Running over HTTP is still reasonably secure provided you use really strong master password and that you avoid using web Vault over connection that is vulnerable to MITM attacks where attacker could inject javascript into your interface. However some forms of 2FA might not work in this setup and [Vault doesn't work in this configuration in Chrome](https://github.com/bitwarden/web/issues/254).
Apply the migrations and save the generated schemas as follows:
```
diesel migration redo
diesel print-schema > src/db/schema.rs
```
## Get in touch
To ask an question, [raising an issue](https://github.com/dani-garcia/bitwarden_rs/issues/new) is fine, also please report any bugs spotted here.
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/#bitwarden_rs:matrix.org) room on Matrix. Feel free to join us!

2
Rocket.toml Normal file
View File

@@ -0,0 +1,2 @@
[global.limits]
json = 10485760 # 10 MiB

5
diesel.toml Normal file
View File

@@ -0,0 +1,5 @@
# For documentation on how to configure this file,
# see diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/db/schema.rs"

View File

@@ -0,0 +1,28 @@
--- a/src/app/services/services.module.ts
+++ b/src/app/services/services.module.ts
@@ -120,20 +120,17 @@ const notificationsService = new NotificationsService(userService, syncService,
const environmentService = new EnvironmentService(apiService, storageService, notificationsService);
const auditService = new AuditService(cryptoFunctionService, apiService);
-const analytics = new Analytics(window, () => platformUtilsService.isDev() || platformUtilsService.isSelfHost(),
+const analytics = new Analytics(window, () => platformUtilsService.isDev() || platformUtilsService.isSelfHost() || true,
platformUtilsService, storageService, appIdService);
containerService.attachToWindow(window);
export function initFactory(): Function {
return async () => {
await (storageService as HtmlStorageService).init();
- const isDev = platformUtilsService.isDev();
- if (!isDev && platformUtilsService.isSelfHost()) {
- environmentService.baseUrl = window.location.origin;
- } else {
- environmentService.notificationsUrl = isDev ? 'http://localhost:61840' :
- 'https://notifications.bitwarden.com'; // window.location.origin + '/notifications';
- }
+ const isDev = false;
+ environmentService.baseUrl = window.location.origin;
+ environmentService.notificationsUrl = window.location.origin + '/notifications';
+
await apiService.setUrls({
base: isDev ? null : window.location.origin,
api: isDev ? 'http://localhost:4000' : null,

View File

@@ -1,9 +0,0 @@
{
"appSettings": {
"apiUri": "/api",
"identityUri": "/identity",
"iconsUri": "/icons",
"stripeKey": "",
"braintreeKey": ""
}
}

View File

@@ -0,0 +1,8 @@
UPDATE users
SET totp_secret = (
SELECT twofactor.data FROM twofactor
WHERE twofactor.type = 0
AND twofactor.user_uuid = users.uuid
);
DROP TABLE twofactor;

View File

@@ -0,0 +1,15 @@
CREATE TABLE twofactor (
uuid TEXT NOT NULL PRIMARY KEY,
user_uuid TEXT NOT NULL REFERENCES users (uuid),
type INTEGER NOT NULL,
enabled BOOLEAN NOT NULL,
data TEXT NOT NULL,
UNIQUE (user_uuid, type)
);
INSERT INTO twofactor (uuid, user_uuid, type, enabled, data)
SELECT lower(hex(randomblob(16))) , uuid, 0, 1, u.totp_secret FROM users u where u.totp_secret IS NOT NULL;
UPDATE users SET totp_secret = NULL; -- Instead of recreating the table, just leave the columns empty

View File

@@ -0,0 +1,3 @@
ALTER TABLE ciphers
ADD COLUMN
password_history TEXT;

View File

@@ -0,0 +1 @@
DROP TABLE invitations;

View File

@@ -0,0 +1,3 @@
CREATE TABLE invitations (
email TEXT NOT NULL PRIMARY KEY
);

View File

@@ -0,0 +1,7 @@
ALTER TABLE users
ADD COLUMN
client_kdf_type INTEGER NOT NULL DEFAULT 0; -- PBKDF2
ALTER TABLE users
ADD COLUMN
client_kdf_iter INTEGER NOT NULL DEFAULT 5000;

1
rust-toolchain Normal file
View File

@@ -0,0 +1 @@
nightly-2018-09-12

View File

@@ -3,10 +3,10 @@ use rocket_contrib::Json;
use db::DbConn;
use db::models::*;
use api::{PasswordData, JsonResult, EmptyResult, JsonUpcase};
use api::{PasswordData, JsonResult, EmptyResult, JsonUpcase, NumberOrString};
use auth::Headers;
use util;
use fast_chemail::is_valid_email;
use mail;
use CONFIG;
@@ -14,8 +14,9 @@ use CONFIG;
#[allow(non_snake_case)]
struct RegisterData {
Email: String,
Kdf: Option<i32>,
KdfIterations: Option<i32>,
Key: String,
#[serde(deserialize_with = "util::upcase_deserialize")]
Keys: Option<KeysData>,
MasterPasswordHash: String,
MasterPasswordHint: Option<String>,
@@ -33,15 +34,40 @@ struct KeysData {
fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
let data: RegisterData = data.into_inner().data;
if !CONFIG.signups_allowed {
err!(format!("Signups not allowed"))
let mut user = match User::find_by_mail(&data.Email, &conn) {
Some(mut user) => {
if Invitation::take(&data.Email, &conn) {
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).iter_mut() {
user_org.status = UserOrgStatus::Accepted as i32;
user_org.save(&conn);
};
user
} else if CONFIG.signups_allowed {
err!("Account with this email already exists")
} else {
err!("Registration not allowed")
}
},
None => {
if CONFIG.signups_allowed || Invitation::take(&data.Email, &conn) {
User::new(data.Email)
} else {
err!("Registration not allowed")
}
}
};
if let Some(client_kdf_iter) = data.KdfIterations {
user.client_kdf_iter = client_kdf_iter;
}
if let Some(_) = User::find_by_mail(&data.Email, &conn) {
err!("Email already exists")
if let Some(client_kdf_type) = data.Kdf {
user.client_kdf_type = client_kdf_type;
}
let mut user = User::new(data.Email, data.Key, data.MasterPasswordHash);
user.set_password(&data.MasterPasswordHash);
user.key = data.Key;
// Add extra fields if present
if let Some(name) = data.Name {
@@ -67,6 +93,36 @@ fn profile(headers: Headers, conn: DbConn) -> JsonResult {
Ok(Json(headers.user.to_json(&conn)))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct ProfileData {
#[serde(rename = "Culture")]
_Culture: String, // Ignored, always use en-US
MasterPasswordHint: Option<String>,
Name: String,
}
#[put("/accounts/profile", data = "<data>")]
fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
post_profile(data, headers, conn)
}
#[post("/accounts/profile", data = "<data>")]
fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: ProfileData = data.into_inner().data;
let mut user = headers.user;
user.name = data.Name;
user.password_hint = match data.MasterPasswordHint {
Some(ref h) if h.is_empty() => None,
_ => data.MasterPasswordHint,
};
user.save(&conn);
Ok(Json(user.to_json(&conn)))
}
#[get("/users/<uuid>/public-key")]
fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult {
let user = match User::find_by_uuid(&uuid, &conn) {
@@ -119,6 +175,35 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
Ok(())
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct ChangeKdfData {
Kdf: i32,
KdfIterations: i32,
MasterPasswordHash: String,
NewMasterPasswordHash: String,
Key: String,
}
#[post("/accounts/kdf", data = "<data>")]
fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: ChangeKdfData = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password")
}
user.client_kdf_iter = data.KdfIterations;
user.client_kdf_type = data.Kdf;
user.set_password(&data.NewMasterPasswordHash);
user.key = data.Key;
user.save(&conn);
Ok(())
}
#[post("/accounts/security-stamp", data = "<data>")]
fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: PasswordData = data.into_inner().data;
@@ -136,13 +221,39 @@ fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct ChangeEmailData {
struct EmailTokenData {
MasterPasswordHash: String,
NewEmail: String,
}
#[post("/accounts/email-token", data = "<data>")]
fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: EmailTokenData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password")
}
if User::find_by_mail(&data.NewEmail, &conn).is_some() {
err!("Email already in use");
}
Ok(())
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct ChangeEmailData {
MasterPasswordHash: String,
NewEmail: String,
Key: String,
NewMasterPasswordHash: String,
#[serde(rename = "Token")]
_Token: NumberOrString,
}
#[post("/accounts/email", data = "<data>")]
fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: ChangeEmailData = data.into_inner().data;
let mut user = headers.user;
@@ -156,12 +267,21 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
}
user.email = data.NewEmail;
user.set_password(&data.NewMasterPasswordHash);
user.key = data.Key;
user.save(&conn);
Ok(())
}
#[post("/accounts/delete", data = "<data>")]
fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
delete_account(data, headers, conn)
}
#[delete("/accounts", data = "<data>")]
fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
@@ -172,17 +292,15 @@ fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn
// Delete ciphers and their attachments
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn) {
match cipher.delete(&conn) {
Ok(()) => (),
Err(_) => err!("Failed deleting cipher")
if cipher.delete(&conn).is_err() {
err!("Failed deleting cipher")
}
}
// Delete folders
for f in Folder::find_by_user(&user.uuid, &conn) {
match f.delete(&conn) {
Ok(()) => (),
Err(_) => err!("Failed deleting folder")
if f.delete(&conn).is_err() {
err!("Failed deleting folder")
}
}
@@ -197,6 +315,62 @@ fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn
#[get("/accounts/revision-date")]
fn revision_date(headers: Headers) -> String {
let revision_date = headers.user.updated_at.timestamp();
let revision_date = headers.user.updated_at.timestamp_millis();
revision_date.to_string()
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct PasswordHintData {
Email: String,
}
#[post("/accounts/password-hint", data = "<data>")]
fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
let data: PasswordHintData = data.into_inner().data;
if !is_valid_email(&data.Email) {
err!("This email address is not valid...");
}
let user = User::find_by_mail(&data.Email, &conn);
if user.is_none() {
return Ok(());
}
let user = user.unwrap();
if let Some(ref mail_config) = CONFIG.mail {
if let Err(e) = mail::send_password_hint(&user.email, user.password_hint, mail_config) {
err!(format!("There have been a problem sending the email: {}", e));
}
} else if CONFIG.show_password_hint {
if let Some(hint) = user.password_hint {
err!(format!("Your password hint is: {}", &hint));
} else {
err!("Sorry, you have no password hint...");
}
}
Ok(())
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct PreloginData {
Email: String,
}
#[post("/accounts/prelogin", data = "<data>")]
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
let data: PreloginData = data.into_inner().data;
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
Some(user) => (user.client_kdf_type, user.client_kdf_iter),
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
};
Ok(Json(json!({
"Kdf": kdf_type,
"KdfIterations": kdf_iter
})))
}

View File

@@ -1,6 +1,7 @@
use std::path::Path;
use std::collections::HashSet;
use rocket::State;
use rocket::Data;
use rocket::http::ContentType;
@@ -14,10 +15,9 @@ use data_encoding::HEXLOWER;
use db::DbConn;
use db::models::*;
use util;
use crypto;
use api::{self, PasswordData, JsonResult, EmptyResult, JsonUpcase};
use api::{self, PasswordData, JsonResult, EmptyResult, JsonUpcase, WebSocketUsers, UpdateType};
use auth::Headers;
use CONFIG;
@@ -87,7 +87,9 @@ fn get_cipher_details(uuid: String, headers: Headers, conn: DbConn) -> JsonResul
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct CipherData {
pub struct CipherData {
// Id is optional as it is included only in bulk share
Id: Option<String>,
// Folder id is not included in import
FolderId: Option<String>,
// TODO: Some of these might appear all the time, no need for Option
@@ -99,8 +101,8 @@ struct CipherData {
Card = 3,
Identity = 4
*/
Type: i32, // TODO: Change this to NumberOrString
Name: String,
pub Type: i32, // TODO: Change this to NumberOrString
pub Name: String,
Notes: Option<String>,
Fields: Option<Value>,
@@ -111,39 +113,41 @@ struct CipherData {
Identity: Option<Value>,
Favorite: Option<bool>,
PasswordHistory: Option<Value>,
}
#[post("/ciphers/admin", data = "<data>")]
fn post_ciphers_admin(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult {
fn post_ciphers_admin(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
// TODO: Implement this correctly
post_ciphers(data, headers, conn)
post_ciphers(data, headers, conn, ws)
}
#[post("/ciphers", data = "<data>")]
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult {
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
let data: CipherData = data.into_inner().data;
let mut cipher = Cipher::new(data.Type, data.Name.clone());
update_cipher_from_data(&mut cipher, data, &headers, true, &conn)?;
update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &ws, UpdateType::SyncCipherCreate)?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
}
fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &Headers, is_new_or_shared: bool, conn: &DbConn) -> EmptyResult {
if is_new_or_shared {
if let Some(org_id) = data.OrganizationId {
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
None => err!("You don't have permission to add item to organization"),
Some(org_user) => if org_user.has_full_access() {
cipher.organization_uuid = Some(org_id);
cipher.user_uuid = None;
} else {
err!("You don't have permission to add cipher directly to organization")
}
pub fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &Headers, shared_to_collection: bool, conn: &DbConn, ws: &State<WebSocketUsers>, ut: UpdateType) -> EmptyResult {
if let Some(org_id) = data.OrganizationId {
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
None => err!("You don't have permission to add item to organization"),
Some(org_user) => if shared_to_collection
|| org_user.has_full_access()
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
cipher.organization_uuid = Some(org_id);
cipher.user_uuid = None;
} else {
err!("You don't have permission to add cipher directly to organization")
}
} else {
cipher.user_uuid = Some(headers.user.uuid.clone());
}
} else {
cipher.user_uuid = Some(headers.user.uuid.clone());
}
if let Some(ref folder_id) = data.FolderId {
@@ -157,24 +161,6 @@ fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &Head
}
}
let uppercase_fields = data.Fields.map(|f| {
let mut value = json!({});
// Copy every field object and change the names to the correct case
copy_values(&f, &mut value);
value
});
// TODO: ******* Backwards compat start **********
// To remove backwards compatibility, just create an empty values object,
// and remove the compat code from cipher::to_json
let mut values = json!({
"Name": data.Name,
"Notes": data.Notes
});
values["Fields"] = uppercase_fields.clone().unwrap_or(Value::Null);
// TODO: ******* Backwards compat end **********
let type_data_opt = match data.Type {
1 => data.Login,
2 => data.SecureNote,
@@ -183,21 +169,29 @@ fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &Head
_ => err!("Invalid type")
};
let type_data = match type_data_opt {
let mut type_data = match type_data_opt {
Some(data) => data,
None => err!("Data missing")
};
// Copy the type data and change the names to the correct case
copy_values(&type_data, &mut values);
// TODO: ******* Backwards compat start **********
// To remove backwards compatibility, just delete this code,
// and remove the compat code from cipher::to_json
type_data["Name"] = Value::String(data.Name.clone());
type_data["Notes"] = data.Notes.clone().map(Value::String).unwrap_or(Value::Null);
type_data["Fields"] = data.Fields.clone().unwrap_or(Value::Null);
type_data["PasswordHistory"] = data.PasswordHistory.clone().unwrap_or(Value::Null);
// TODO: ******* Backwards compat end **********
cipher.favorite = data.Favorite.unwrap_or(false);
cipher.name = data.Name;
cipher.notes = data.Notes;
cipher.fields = uppercase_fields.map(|f| f.to_string());
cipher.data = values.to_string();
cipher.fields = data.Fields.map(|f| f.to_string());
cipher.data = type_data.to_string();
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
cipher.save(&conn);
ws.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
if cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn).is_err() {
err!("Error saving the folder information")
@@ -206,23 +200,6 @@ fn update_cipher_from_data(cipher: &mut Cipher, data: CipherData, headers: &Head
Ok(())
}
fn copy_values(from: &Value, to: &mut Value) {
if let Some(map) = from.as_object() {
for (key, val) in map {
copy_values(val, &mut to[util::upcase_first(key)]);
}
} else if let Some(array) = from.as_array() {
// Initialize array with null values
*to = json!(vec![Value::Null; array.len()]);
for (index, val) in array.iter().enumerate() {
copy_values(val, &mut to[index]);
}
} else {
*to = from.clone();
}
}
use super::folders::FolderData;
#[derive(Deserialize)]
@@ -237,14 +214,14 @@ struct ImportData {
#[allow(non_snake_case)]
struct RelationsData {
// Cipher id
key: u32,
Key: usize,
// Folder id
value: u32,
Value: usize,
}
#[post("/ciphers/import", data = "<data>")]
fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbConn) -> EmptyResult {
fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let data: ImportData = data.into_inner().data;
// Read and create the folders
@@ -259,39 +236,45 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
let mut relations_map = HashMap::new();
for relation in data.FolderRelationships {
relations_map.insert(relation.key, relation.value);
relations_map.insert(relation.Key, relation.Value);
}
// Read and create the ciphers
let mut index = 0;
for cipher_data in data.Ciphers {
for (index, cipher_data) in data.Ciphers.into_iter().enumerate() {
let folder_uuid = relations_map.get(&index)
.map(|i| folders[*i as usize].uuid.clone());
.map(|i| folders[*i].uuid.clone());
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
update_cipher_from_data(&mut cipher, cipher_data, &headers, true, &conn)?;
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &ws, UpdateType::SyncCipherCreate)?;
cipher.move_to_folder(folder_uuid, &headers.user.uuid.clone(), &conn).ok();
index += 1;
}
Ok(())
let mut user = headers.user;
match user.update_revision(&conn) {
Ok(()) => Ok(()),
Err(_) => err!("Failed to update the revision, please log out and log back in to finish import.")
}
}
#[put("/ciphers/<uuid>/admin", data = "<data>")]
fn put_cipher_admin(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
put_cipher(uuid, data, headers, conn, ws)
}
#[post("/ciphers/<uuid>/admin", data = "<data>")]
fn post_cipher_admin(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult {
// TODO: Implement this correctly
post_cipher(uuid, data, headers, conn)
fn post_cipher_admin(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
post_cipher(uuid, data, headers, conn, ws)
}
#[post("/ciphers/<uuid>", data = "<data>")]
fn post_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult {
put_cipher(uuid, data, headers, conn)
fn post_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
put_cipher(uuid, data, headers, conn, ws)
}
#[put("/ciphers/<uuid>", data = "<data>")]
fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn) -> JsonResult {
fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
let data: CipherData = data.into_inner().data;
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
@@ -303,7 +286,7 @@ fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn
err!("Cipher is not write accessible")
}
update_cipher_from_data(&mut cipher, data, &headers, false, &conn)?;
update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &ws, UpdateType::SyncCipherUpdate)?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
}
@@ -319,6 +302,11 @@ fn post_collections_update(uuid: String, data: JsonUpcase<CollectionsAdminData>,
post_collections_admin(uuid, data, headers, conn)
}
#[put("/ciphers/<uuid>/collections-admin", data = "<data>")]
fn put_collections_admin(uuid: String, data: JsonUpcase<CollectionsAdminData>, headers: Headers, conn: DbConn) -> EmptyResult {
post_collections_admin(uuid, data, headers, conn)
}
#[post("/ciphers/<uuid>/collections-admin", data = "<data>")]
fn post_collections_admin(uuid: String, data: JsonUpcase<CollectionsAdminData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: CollectionsAdminData = data.into_inner().data;
@@ -358,15 +346,74 @@ fn post_collections_admin(uuid: String, data: JsonUpcase<CollectionsAdminData>,
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct ShareCipherData {
#[serde(deserialize_with = "util::upcase_deserialize")]
Cipher: CipherData,
CollectionIds: Vec<String>,
}
#[post("/ciphers/<uuid>/share", data = "<data>")]
fn post_cipher_share(uuid: String, data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn) -> JsonResult {
fn post_cipher_share(uuid: String, data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
let data: ShareCipherData = data.into_inner().data;
share_cipher_by_uuid(&uuid, data, &headers, &conn, &ws)
}
#[put("/ciphers/<uuid>/share", data = "<data>")]
fn put_cipher_share(uuid: String, data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
let data: ShareCipherData = data.into_inner().data;
share_cipher_by_uuid(&uuid, data, &headers, &conn, &ws)
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct ShareSelectedCipherData {
Ciphers: Vec<CipherData>,
CollectionIds: Vec<String>
}
#[put("/ciphers/share", data = "<data>")]
fn put_cipher_share_seleted(data: JsonUpcase<ShareSelectedCipherData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let mut data: ShareSelectedCipherData = data.into_inner().data;
let mut cipher_ids: Vec<String> = Vec::new();
if data.Ciphers.is_empty() {
err!("You must select at least one cipher.")
}
if data.CollectionIds.is_empty() {
err!("You must select at least one collection.")
}
for cipher in data.Ciphers.iter() {
match cipher.Id {
Some(ref id) => cipher_ids.push(id.to_string()),
None => err!("Request missing ids field")
};
}
let attachments = Attachment::find_by_ciphers(cipher_ids, &conn);
if !attachments.is_empty() {
err!("Ciphers should not have any attachments.")
}
while let Some(cipher) = data.Ciphers.pop() {
let mut shared_cipher_data = ShareCipherData {
Cipher: cipher,
CollectionIds: data.CollectionIds.clone()
};
match shared_cipher_data.Cipher.Id.take() {
Some(id) => share_cipher_by_uuid(&id, shared_cipher_data , &headers, &conn, &ws)?,
None => err!("Request missing ids field")
};
}
Ok(())
}
fn share_cipher_by_uuid(uuid: &str, data: ShareCipherData, headers: &Headers, conn: &DbConn, ws: &State<WebSocketUsers>) -> JsonResult {
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
Some(cipher) => {
if cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
@@ -378,22 +425,28 @@ fn post_cipher_share(uuid: String, data: JsonUpcase<ShareCipherData>, headers: H
None => err!("Cipher doesn't exist")
};
match data.Cipher.OrganizationId {
match data.Cipher.OrganizationId.clone() {
None => err!("Organization id not provided"),
Some(_) => {
update_cipher_from_data(&mut cipher, data.Cipher, &headers, true, &conn)?;
for collection in data.CollectionIds.iter() {
match Collection::find_by_uuid(&collection, &conn) {
Some(organization_uuid) => {
let mut shared_to_collection = false;
for uuid in &data.CollectionIds {
match Collection::find_by_uuid(uuid, &conn) {
None => err!("Invalid collection ID provided"),
Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, &conn) {
CollectionCipher::save(&cipher.uuid.clone(), &collection.uuid, &conn);
if collection.org_uuid == organization_uuid {
CollectionCipher::save(&cipher.uuid.clone(), &collection.uuid, &conn);
shared_to_collection = true;
} else {
err!("Collection does not belong to organization")
}
} else {
err!("No rights to modify the collection")
}
}
}
}
update_cipher_from_data(&mut cipher, data.Cipher, &headers, shared_to_collection, &conn, &ws, UpdateType::SyncCipherUpdate)?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
}
@@ -418,7 +471,8 @@ fn post_attachment(uuid: String, data: Data, content_type: &ContentType, headers
let base_path = Path::new(&CONFIG.attachments_folder).join(&cipher.uuid);
Multipart::with_body(data.open(), boundary).foreach_entry(|mut field| {
let name = field.headers.filename.unwrap(); // This is provided by the client, don't trust it
// This is provided by the client, don't trust it
let name = field.headers.filename.expect("No filename provided");
let file_name = HEXLOWER.encode(&crypto::get_random(vec![0; 10]));
let path = base_path.join(&file_name);
@@ -428,73 +482,95 @@ fn post_attachment(uuid: String, data: Data, content_type: &ContentType, headers
.size_limit(None)
.with_path(path) {
SaveResult::Full(SavedData::File(_, size)) => size as i32,
_ => return
SaveResult::Full(other) => {
println!("Attachment is not a file: {:?}", other);
return;
},
SaveResult::Partial(_, reason) => {
println!("Partial result: {:?}", reason);
return;
},
SaveResult::Error(e) => {
println!("Error: {:?}", e);
return;
}
};
let attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size);
println!("Attachment: {:#?}", attachment);
attachment.save(&conn);
match attachment.save(&conn) {
Ok(()) => (),
Err(_) => println!("Error: failed to save attachment")
};
}).expect("Error processing multipart data");
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
}
#[post("/ciphers/<uuid>/attachment-admin", format = "multipart/form-data", data = "<data>")]
fn post_attachment_admin(uuid: String, data: Data, content_type: &ContentType, headers: Headers, conn: DbConn) -> JsonResult {
post_attachment(uuid, data, content_type, headers, conn)
}
#[post("/ciphers/<uuid>/attachment/<attachment_id>/share", format = "multipart/form-data", data = "<data>")]
fn post_attachment_share(uuid: String, attachment_id: String, data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &ws)?;
post_attachment(uuid, data, content_type, headers, conn)
}
#[post("/ciphers/<uuid>/attachment/<attachment_id>/delete-admin")]
fn delete_attachment_post_admin(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
delete_attachment(uuid, attachment_id, headers, conn, ws)
}
#[post("/ciphers/<uuid>/attachment/<attachment_id>/delete")]
fn delete_attachment_post(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
delete_attachment(uuid, attachment_id, headers, conn)
fn delete_attachment_post(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
delete_attachment(uuid, attachment_id, headers, conn, ws)
}
#[delete("/ciphers/<uuid>/attachment/<attachment_id>")]
fn delete_attachment(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
let attachment = match Attachment::find_by_id(&attachment_id, &conn) {
Some(attachment) => attachment,
None => err!("Attachment doesn't exist")
};
fn delete_attachment(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &ws)
}
if attachment.cipher_uuid != uuid {
err!("Attachment from other cipher")
}
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
Some(cipher) => cipher,
None => err!("Cipher doesn't exist")
};
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
err!("Cipher cannot be deleted by user")
}
// Delete attachment
match attachment.delete(&conn) {
Ok(()) => Ok(()),
Err(_) => err!("Deleting attachement failed")
}
#[delete("/ciphers/<uuid>/attachment/<attachment_id>/admin")]
fn delete_attachment_admin(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
_delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &ws)
}
#[post("/ciphers/<uuid>/delete")]
fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn)
fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn, &ws)
}
#[post("/ciphers/<uuid>/delete-admin")]
fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn, &ws)
}
#[delete("/ciphers/<uuid>")]
fn delete_cipher(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn)
fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn, &ws)
}
#[post("/ciphers/delete", data = "<data>")]
fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn) -> EmptyResult {
#[delete("/ciphers/<uuid>/admin")]
fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn, &ws)
}
#[delete("/ciphers", data = "<data>")]
fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let data: Value = data.into_inner().data;
let uuids = match data.get("Ids") {
Some(ids) => match ids.as_array() {
Some(ids) => ids.iter().filter_map(|uuid| { uuid.as_str() }),
Some(ids) => ids.iter().filter_map(Value::as_str),
None => err!("Posted ids field is not an array")
},
None => err!("Request missing ids field")
};
for uuid in uuids {
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn) {
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, &ws) {
return error;
};
}
@@ -502,8 +578,13 @@ fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbCon
Ok(())
}
#[post("/ciphers/delete", data = "<data>")]
fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
delete_cipher_selected(data, headers, conn, ws)
}
#[post("/ciphers/move", data = "<data>")]
fn move_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn) -> EmptyResult {
fn move_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let data = data.into_inner().data;
let folder_id = match data.get("FolderId") {
@@ -528,7 +609,7 @@ fn move_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn)
let uuids = match data.get("Ids") {
Some(ids) => match ids.as_array() {
Some(ids) => ids.iter().filter_map(|uuid| { uuid.as_str() }),
Some(ids) => ids.iter().filter_map(Value::as_str),
None => err!("Posted ids field is not an array")
},
None => err!("Request missing ids field")
@@ -549,13 +630,19 @@ fn move_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn)
err!("Error saving the folder information")
}
cipher.save(&conn);
ws.send_cipher_update(UpdateType::SyncCipherUpdate, &cipher, &cipher.update_users_revision(&conn));
}
Ok(())
}
#[put("/ciphers/move", data = "<data>")]
fn move_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
move_cipher_selected(data, headers, conn, ws)
}
#[post("/ciphers/purge", data = "<data>")]
fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let data: PasswordData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
@@ -567,25 +654,29 @@ fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) ->
// Delete ciphers and their attachments
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn) {
match cipher.delete(&conn) {
Ok(()) => (),
Err(_) => err!("Failed deleting cipher")
if cipher.delete(&conn).is_err() {
err!("Failed deleting cipher")
}
else {
ws.send_cipher_update(UpdateType::SyncCipherDelete, &cipher, &cipher.update_users_revision(&conn));
}
}
// Delete folders
for f in Folder::find_by_user(&user.uuid, &conn) {
match f.delete(&conn) {
Ok(()) => (),
Err(_) => err!("Failed deleting folder")
}
if f.delete(&conn).is_err() {
err!("Failed deleting folder")
}
else {
ws.send_folder_update(UpdateType::SyncFolderCreate, &f);
}
}
Ok(())
}
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn) -> EmptyResult {
let cipher = match Cipher::find_by_uuid(uuid, conn) {
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, ws: &State<WebSocketUsers>) -> EmptyResult {
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
Some(cipher) => cipher,
None => err!("Cipher doesn't exist"),
};
@@ -594,8 +685,40 @@ fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn) -> Empty
err!("Cipher can't be deleted by user")
}
match cipher.delete(conn) {
Ok(()) => Ok(()),
match cipher.delete(&conn) {
Ok(()) => {
ws.send_cipher_update(UpdateType::SyncCipherDelete, &cipher, &cipher.update_users_revision(&conn));
Ok(())
}
Err(_) => err!("Failed deleting cipher")
}
}
fn _delete_cipher_attachment_by_id(uuid: &str, attachment_id: &str, headers: &Headers, conn: &DbConn, ws: &State<WebSocketUsers>) -> EmptyResult {
let attachment = match Attachment::find_by_id(&attachment_id, &conn) {
Some(attachment) => attachment,
None => err!("Attachment doesn't exist")
};
if attachment.cipher_uuid != uuid {
err!("Attachment from other cipher")
}
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
Some(cipher) => cipher,
None => err!("Cipher doesn't exist")
};
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
err!("Cipher cannot be deleted by user")
}
// Delete attachment
match attachment.delete(&conn) {
Ok(()) => {
ws.send_cipher_update(UpdateType::SyncCipherDelete, &cipher, &cipher.update_users_revision(&conn));
Ok(())
}
Err(_) => err!("Deleting attachement failed")
}
}

View File

@@ -1,9 +1,10 @@
use rocket::State;
use rocket_contrib::{Json, Value};
use db::DbConn;
use db::models::*;
use api::{JsonResult, EmptyResult, JsonUpcase};
use api::{JsonResult, EmptyResult, JsonUpcase, WebSocketUsers, UpdateType};
use auth::Headers;
#[get("/folders")]
@@ -40,23 +41,24 @@ pub struct FolderData {
}
#[post("/folders", data = "<data>")]
fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn) -> JsonResult {
fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
let data: FolderData = data.into_inner().data;
let mut folder = Folder::new(headers.user.uuid.clone(), data.Name);
folder.save(&conn);
ws.send_folder_update(UpdateType::SyncFolderCreate, &folder);
Ok(Json(folder.to_json()))
}
#[post("/folders/<uuid>", data = "<data>")]
fn post_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn) -> JsonResult {
put_folder(uuid, data, headers, conn)
fn post_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
put_folder(uuid, data, headers, conn, ws)
}
#[put("/folders/<uuid>", data = "<data>")]
fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn) -> JsonResult {
fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> JsonResult {
let data: FolderData = data.into_inner().data;
let mut folder = match Folder::find_by_uuid(&uuid, &conn) {
@@ -71,17 +73,18 @@ fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn
folder.name = data.Name;
folder.save(&conn);
ws.send_folder_update(UpdateType::SyncFolderUpdate, &folder);
Ok(Json(folder.to_json()))
}
#[post("/folders/<uuid>/delete")]
fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
delete_folder(uuid, headers, conn)
fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
delete_folder(uuid, headers, conn, ws)
}
#[delete("/folders/<uuid>")]
fn delete_folder(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
fn delete_folder(uuid: String, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let folder = match Folder::find_by_uuid(&uuid, &conn) {
Some(folder) => folder,
_ => err!("Invalid folder")
@@ -93,7 +96,10 @@ fn delete_folder(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
// Delete the actual folder entry
match folder.delete(&conn) {
Ok(()) => Ok(()),
Ok(()) => {
ws.send_folder_update(UpdateType::SyncFolderDelete, &folder);
Ok(())
}
Err(_) => err!("Failed deleting folder")
}
}

View File

@@ -646,5 +646,121 @@
"wiktionary.org"
],
"Excluded": false
},
{
"Type": 72,
"Domains": [
"airbnb.at",
"airbnb.be",
"airbnb.ca",
"airbnb.ch",
"airbnb.cl",
"airbnb.co.cr",
"airbnb.co.id",
"airbnb.co.in",
"airbnb.co.kr",
"airbnb.co.nz",
"airbnb.co.uk",
"airbnb.co.ve",
"airbnb.com",
"airbnb.com.ar",
"airbnb.com.au",
"airbnb.com.bo",
"airbnb.com.br",
"airbnb.com.bz",
"airbnb.com.co",
"airbnb.com.ec",
"airbnb.com.gt",
"airbnb.com.hk",
"airbnb.com.hn",
"airbnb.com.mt",
"airbnb.com.my",
"airbnb.com.ni",
"airbnb.com.pa",
"airbnb.com.pe",
"airbnb.com.py",
"airbnb.com.sg",
"airbnb.com.sv",
"airbnb.com.tr",
"airbnb.com.tw",
"airbnb.cz",
"airbnb.de",
"airbnb.dk",
"airbnb.es",
"airbnb.fi",
"airbnb.fr",
"airbnb.gr",
"airbnb.gy",
"airbnb.hu",
"airbnb.ie",
"airbnb.is",
"airbnb.it",
"airbnb.jp",
"airbnb.mx",
"airbnb.nl",
"airbnb.no",
"airbnb.pl",
"airbnb.pt",
"airbnb.ru",
"airbnb.se"
],
"Excluded": false
},
{
"Type": 73,
"Domains": [
"eventbrite.at",
"eventbrite.be",
"eventbrite.ca",
"eventbrite.ch",
"eventbrite.cl",
"eventbrite.co.id",
"eventbrite.co.in",
"eventbrite.co.kr",
"eventbrite.co.nz",
"eventbrite.co.uk",
"eventbrite.co.ve",
"eventbrite.com",
"eventbrite.com.au",
"eventbrite.com.bo",
"eventbrite.com.br",
"eventbrite.com.co",
"eventbrite.com.hk",
"eventbrite.com.hn",
"eventbrite.com.pe",
"eventbrite.com.sg",
"eventbrite.com.tr",
"eventbrite.com.tw",
"eventbrite.cz",
"eventbrite.de",
"eventbrite.dk",
"eventbrite.fi",
"eventbrite.fr",
"eventbrite.gy",
"eventbrite.hu",
"eventbrite.ie",
"eventbrite.is",
"eventbrite.it",
"eventbrite.jp",
"eventbrite.mx",
"eventbrite.nl",
"eventbrite.no",
"eventbrite.pl",
"eventbrite.pt",
"eventbrite.ru",
"eventbrite.se"
],
"Excluded": false
},
{
"Type": 74,
"Domains": [
"stackexchange.com",
"superuser.com",
"stackoverflow.com",
"serverfault.com",
"mathoverflow.net"
],
"Excluded": false
}
]

View File

@@ -2,7 +2,7 @@ mod accounts;
mod ciphers;
mod folders;
mod organizations;
mod two_factor;
pub(crate) mod two_factor;
use self::accounts::*;
use self::ciphers::*;
@@ -14,13 +14,20 @@ pub fn routes() -> Vec<Route> {
routes![
register,
profile,
put_profile,
post_profile,
get_public_keys,
post_keys,
post_password,
post_kdf,
post_sstamp,
post_email_token,
post_email,
delete_account,
post_delete_account,
revision_date,
password_hint,
prelogin,
sync,
@@ -29,20 +36,31 @@ pub fn routes() -> Vec<Route> {
get_cipher_admin,
get_cipher_details,
post_ciphers,
put_cipher_admin,
post_ciphers_admin,
post_ciphers_import,
post_attachment,
post_attachment_admin,
post_attachment_share,
delete_attachment_post,
delete_attachment_post_admin,
delete_attachment,
delete_attachment_admin,
post_cipher_admin,
post_cipher_share,
put_cipher_share,
put_cipher_share_seleted,
post_cipher,
put_cipher,
delete_cipher_post,
delete_cipher_post_admin,
delete_cipher,
delete_cipher_admin,
delete_cipher_selected,
delete_cipher_selected_post,
delete_all,
move_cipher_selected,
move_cipher_selected_put,
get_folders,
get_folder,
@@ -55,31 +73,46 @@ pub fn routes() -> Vec<Route> {
get_twofactor,
get_recover,
recover,
disable_twofactor,
disable_twofactor_put,
generate_authenticator,
activate_authenticator,
disable_authenticator,
activate_authenticator_put,
generate_u2f,
activate_u2f,
activate_u2f_put,
get_organization,
create_organization,
delete_organization,
post_delete_organization,
leave_organization,
get_user_collections,
get_org_collections,
get_org_collection_detail,
get_collection_users,
put_organization,
post_organization,
post_organization_collections,
delete_organization_collection_user,
post_organization_collection_delete_user,
post_organization_collection_update,
put_organization_collection_update,
delete_organization_collection,
post_organization_collection_delete,
post_collections_update,
post_collections_admin,
put_collections_admin,
get_org_details,
get_org_users,
send_invite,
confirm_invite,
get_user,
edit_user,
put_organization_user,
delete_user,
post_delete_user,
post_org_import,
clear_device_token,
put_device_token,
@@ -106,8 +139,7 @@ use auth::Headers;
#[put("/devices/identifier/<uuid>/clear-token", data = "<data>")]
fn clear_device_token(uuid: String, data: Json<Value>, headers: Headers, conn: DbConn) -> EmptyResult {
println!("UUID: {:#?}", uuid);
println!("DATA: {:#?}", data);
let _data: Value = data.into_inner();
let device = match Device::find_by_uuid(&uuid, &conn) {
Some(device) => device,
@@ -125,9 +157,8 @@ fn clear_device_token(uuid: String, data: Json<Value>, headers: Headers, conn: D
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
fn put_device_token(uuid: String, data: Json<Value>, headers: Headers, conn: DbConn) -> JsonResult {
println!("UUID: {:#?}", uuid);
println!("DATA: {:#?}", data);
let _data: Value = data.into_inner();
let device = match Device::find_by_uuid(&uuid, &conn) {
Some(device) => device,
None => err!("Device not found")
@@ -150,7 +181,7 @@ struct GlobalDomain {
Excluded: bool,
}
const GLOBAL_DOMAINS: &'static str = include_str!("global_domains.json");
const GLOBAL_DOMAINS: &str = include_str!("global_domains.json");
#[get("/settings/domains")]
fn get_eq_domains(headers: Headers) -> JsonResult {
@@ -185,8 +216,8 @@ struct EquivDomainData {
fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: EquivDomainData = data.into_inner().data;
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or(Vec::new());
let equivalent_domains = data.EquivalentDomains.unwrap_or(Vec::new());
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
let equivalent_domains = data.EquivalentDomains.unwrap_or_default();
let mut user = headers.user;
use serde_json::to_string;

View File

@@ -1,13 +1,14 @@
#![allow(unused_imports)]
use rocket::State;
use rocket_contrib::{Json, Value};
use CONFIG;
use db::DbConn;
use db::models::*;
use api::{PasswordData, JsonResult, EmptyResult, NumberOrString, JsonUpcase};
use api::{PasswordData, JsonResult, EmptyResult, NumberOrString, JsonUpcase, WebSocketUsers, UpdateType};
use auth::{Headers, AdminHeaders, OwnerHeaders};
use serde::{Deserialize, Deserializer};
#[derive(Deserialize)]
#[allow(non_snake_case)]
@@ -17,7 +18,7 @@ struct OrgData {
Key: String,
Name: String,
#[serde(rename = "PlanType")]
_PlanType: String, // Ignored, always use the same plan
_PlanType: NumberOrString, // Ignored, always use the same plan
}
#[derive(Deserialize, Debug)]
@@ -55,7 +56,7 @@ fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn
Ok(Json(org.to_json()))
}
#[post("/organizations/<org_id>/delete", data = "<data>")]
#[delete("/organizations/<org_id>", data = "<data>")]
fn delete_organization(org_id: String, data: JsonUpcase<PasswordData>, headers: OwnerHeaders, conn: DbConn) -> EmptyResult {
let data: PasswordData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
@@ -73,6 +74,34 @@ fn delete_organization(org_id: String, data: JsonUpcase<PasswordData>, headers:
}
}
#[post("/organizations/<org_id>/delete", data = "<data>")]
fn post_delete_organization(org_id: String, data: JsonUpcase<PasswordData>, headers: OwnerHeaders, conn: DbConn) -> EmptyResult {
delete_organization(org_id, data, headers, conn)
}
#[post("/organizations/<org_id>/leave")]
fn leave_organization(org_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
None => err!("User not part of organization"),
Some(user_org) => {
if user_org.type_ == UserOrgType::Owner as i32 {
let num_owners = UserOrganization::find_by_org_and_type(
&org_id, UserOrgType::Owner as i32, &conn)
.len();
if num_owners <= 1 {
err!("The last owner can't leave")
}
}
match user_org.delete(&conn) {
Ok(()) => Ok(()),
Err(_) => err!("Failed leaving the organization")
}
}
}
}
#[get("/organizations/<org_id>")]
fn get_organization(org_id: String, _headers: OwnerHeaders, conn: DbConn) -> JsonResult {
match Organization::find_by_uuid(&org_id, &conn) {
@@ -81,6 +110,11 @@ fn get_organization(org_id: String, _headers: OwnerHeaders, conn: DbConn) -> Jso
}
}
#[put("/organizations/<org_id>", data = "<data>")]
fn put_organization(org_id: String, headers: OwnerHeaders, data: JsonUpcase<OrganizationUpdateData>, conn: DbConn) -> JsonResult {
post_organization(org_id, headers, data, conn)
}
#[post("/organizations/<org_id>", data = "<data>")]
fn post_organization(org_id: String, _headers: OwnerHeaders, data: JsonUpcase<OrganizationUpdateData>, conn: DbConn) -> JsonResult {
let data: OrganizationUpdateData = data.into_inner().data;
@@ -105,9 +139,8 @@ fn get_user_collections(headers: Headers, conn: DbConn) -> JsonResult {
"Data":
Collection::find_by_user_uuid(&headers.user.uuid, &conn)
.iter()
.map(|collection| {
collection.to_json()
}).collect::<Value>(),
.map(Collection::to_json)
.collect::<Value>(),
"Object": "list"
})))
}
@@ -118,9 +151,8 @@ fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) ->
"Data":
Collection::find_by_organization(&org_id, &conn)
.iter()
.map(|collection| {
collection.to_json()
}).collect::<Value>(),
.map(Collection::to_json)
.collect::<Value>(),
"Object": "list"
})))
}
@@ -141,6 +173,11 @@ fn post_organization_collections(org_id: String, _headers: AdminHeaders, data: J
Ok(Json(collection.to_json()))
}
#[put("/organizations/<org_id>/collections/<col_id>", data = "<data>")]
fn put_organization_collection_update(org_id: String, col_id: String, headers: AdminHeaders, data: JsonUpcase<NewCollectionData>, conn: DbConn) -> JsonResult {
post_organization_collection_update(org_id, col_id, headers, data, conn)
}
#[post("/organizations/<org_id>/collections/<col_id>", data = "<data>")]
fn post_organization_collection_update(org_id: String, col_id: String, _headers: AdminHeaders, data: JsonUpcase<NewCollectionData>, conn: DbConn) -> JsonResult {
let data: NewCollectionData = data.into_inner().data;
@@ -165,8 +202,9 @@ fn post_organization_collection_update(org_id: String, col_id: String, _headers:
Ok(Json(collection.to_json()))
}
#[post("/organizations/<org_id>/collections/<col_id>/delete-user/<org_user_id>")]
fn post_organization_collection_delete_user(org_id: String, col_id: String, org_user_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult {
#[delete("/organizations/<org_id>/collections/<col_id>/user/<org_user_id>")]
fn delete_organization_collection_user(org_id: String, col_id: String, org_user_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let collection = match Collection::find_by_uuid(&col_id, &conn) {
None => err!("Collection not found"),
Some(collection) => if collection.org_uuid == org_id {
@@ -176,7 +214,7 @@ fn post_organization_collection_delete_user(org_id: String, col_id: String, org_
}
};
match UserOrganization::find_by_uuid(&org_user_id, &conn) {
match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) {
None => err!("User not found in organization"),
Some(user_org) => {
match CollectionUser::find_by_collection_and_user(&collection.uuid, &user_org.user_uuid, &conn) {
@@ -192,17 +230,13 @@ fn post_organization_collection_delete_user(org_id: String, col_id: String, org_
}
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct DeleteCollectionData {
Id: String,
OrgId: String,
#[post("/organizations/<org_id>/collections/<col_id>/delete-user/<org_user_id>")]
fn post_organization_collection_delete_user(org_id: String, col_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
delete_organization_collection_user(org_id, col_id, org_user_id, headers, conn)
}
#[post("/organizations/<org_id>/collections/<col_id>/delete", data = "<data>")]
fn post_organization_collection_delete(org_id: String, col_id: String, _headers: AdminHeaders, data: JsonUpcase<DeleteCollectionData>, conn: DbConn) -> EmptyResult {
let _data: DeleteCollectionData = data.into_inner().data;
#[delete("/organizations/<org_id>/collections/<col_id>")]
fn delete_organization_collection(org_id: String, col_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult {
match Collection::find_by_uuid(&col_id, &conn) {
None => err!("Collection not found"),
Some(collection) => if collection.org_uuid == org_id {
@@ -216,6 +250,18 @@ fn post_organization_collection_delete(org_id: String, col_id: String, _headers:
}
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct DeleteCollectionData {
Id: String,
OrgId: String,
}
#[post("/organizations/<org_id>/collections/<col_id>/delete", data = "<_data>")]
fn post_organization_collection_delete(org_id: String, col_id: String, headers: AdminHeaders, _data: JsonUpcase<DeleteCollectionData>, conn: DbConn) -> EmptyResult {
delete_organization_collection(org_id, col_id, headers, conn)
}
#[get("/organizations/<org_id>/collections/<coll_id>/details")]
fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHeaders, conn: DbConn) -> JsonResult {
match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) {
@@ -243,7 +289,7 @@ fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders,
.iter().map(|col_user| {
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
.unwrap()
.to_json_collection_user_details(&col_user.read_only, &conn)
.to_json_collection_user_details(col_user.read_only, &conn)
}).collect();
Ok(Json(json!({
@@ -285,11 +331,19 @@ fn get_org_users(org_id: String, headers: AdminHeaders, conn: DbConn) -> JsonRes
})))
}
fn deserialize_collections<'de, D>(deserializer: D) -> Result<Vec<CollectionData>, D::Error>
where
D: Deserializer<'de>,
{
// Deserialize null to empty Vec
Deserialize::deserialize(deserializer).or(Ok(vec![]))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct CollectionData {
id: String,
readOnly: bool,
Id: String,
ReadOnly: bool,
}
#[derive(Deserialize)]
@@ -297,6 +351,7 @@ struct CollectionData {
struct InviteData {
Emails: Vec<String>,
Type: NumberOrString,
#[serde(deserialize_with = "deserialize_collections")]
Collections: Vec<CollectionData>,
AccessAll: Option<bool>,
}
@@ -305,7 +360,7 @@ struct InviteData {
fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let data: InviteData = data.into_inner().data;
let new_type = match UserOrgType::from_str(&data.Type.to_string()) {
let new_type = match UserOrgType::from_str(&data.Type.into_string()) {
Some(new_type) => new_type as i32,
None => err!("Invalid type")
};
@@ -315,56 +370,70 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
err!("Only Owners can invite Admins or Owners")
}
for user_opt in data.Emails.iter().map(|email| User::find_by_mail(email, &conn)) {
match user_opt {
None => err!("User email does not exist"),
Some(user) => {
match UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn) {
Some(_) => err!("User already in organization"),
None => ()
for email in data.Emails.iter() {
let mut user_org_status = UserOrgStatus::Accepted as i32;
let user = match User::find_by_mail(&email, &conn) {
None => if CONFIG.invitations_allowed { // Invite user if that's enabled
let mut invitation = Invitation::new(email.clone());
match invitation.save(&conn) {
Ok(()) => {
let mut user = User::new(email.clone());
if user.save(&conn) {
user_org_status = UserOrgStatus::Invited as i32;
user
} else {
err!("Failed to create placeholder for invited user")
}
}
Err(_) => err!(format!("Failed to invite: {}", email))
}
} else {
err!(format!("User email does not exist: {}", email))
},
Some(user) => if UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn).is_some() {
err!(format!("User already in organization: {}", email))
} else {
user
}
let mut new_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
let access_all = data.AccessAll.unwrap_or(false);
new_user.access_all = access_all;
new_user.type_ = new_type;
};
// If no accessAll, add the collections received
if !access_all {
for col in data.Collections.iter() {
match Collection::find_by_uuid_and_org(&col.id, &org_id, &conn) {
None => err!("Collection not found in Organization"),
Some(collection) => {
match CollectionUser::save(&user.uuid, &collection.uuid, col.readOnly, &conn) {
Ok(()) => (),
Err(_) => err!("Failed saving collection access for user")
}
}
let mut new_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
let access_all = data.AccessAll.unwrap_or(false);
new_user.access_all = access_all;
new_user.type_ = new_type;
new_user.status = user_org_status;
// If no accessAll, add the collections received
if !access_all {
for col in &data.Collections {
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
None => err!("Collection not found in Organization"),
Some(collection) => {
if CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, &conn).is_err() {
err!("Failed saving collection access for user")
}
}
}
new_user.save(&conn);
}
}
new_user.save(&conn);
}
Ok(())
}
#[post("/organizations/<org_id>/users/<user_id>/confirm", data = "<data>")]
fn confirm_invite(org_id: String, user_id: String, data: JsonUpcase<Value>, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
#[post("/organizations/<org_id>/users/<org_user_id>/confirm", data = "<data>")]
fn confirm_invite(org_id: String, org_user_id: String, data: JsonUpcase<Value>, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let data = data.into_inner().data;
let mut user_to_confirm = match UserOrganization::find_by_uuid(&user_id, &conn) {
let mut user_to_confirm = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) {
Some(user) => user,
None => err!("Failed to find user membership")
None => err!("The specified user isn't a member of the organization")
};
if user_to_confirm.org_uuid != org_id {
err!("The specified user isn't a member of the organization")
}
if user_to_confirm.type_ != UserOrgType::User as i32 &&
headers.org_user_type != UserOrgType::Owner as i32 {
err!("Only Owners can confirm Admins or Owners")
@@ -375,7 +444,7 @@ fn confirm_invite(org_id: String, user_id: String, data: JsonUpcase<Value>, head
}
user_to_confirm.status = UserOrgStatus::Confirmed as i32;
user_to_confirm.key = match data["key"].as_str() {
user_to_confirm.key = match data["Key"].as_str() {
Some(key) => key.to_string(),
None => err!("Invalid key provided")
};
@@ -385,17 +454,13 @@ fn confirm_invite(org_id: String, user_id: String, data: JsonUpcase<Value>, head
Ok(())
}
#[get("/organizations/<org_id>/users/<user_id>")]
fn get_user(org_id: String, user_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
let user = match UserOrganization::find_by_uuid(&user_id, &conn) {
#[get("/organizations/<org_id>/users/<org_user_id>")]
fn get_user(org_id: String, org_user_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
let user = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) {
Some(user) => user,
None => err!("Failed to find user membership")
None => err!("The specified user isn't a member of the organization")
};
if user.org_uuid != org_id {
err!("The specified user isn't a member of the organization")
}
Ok(Json(user.to_json_details(&conn)))
}
@@ -403,32 +468,41 @@ fn get_user(org_id: String, user_id: String, _headers: AdminHeaders, conn: DbCon
#[allow(non_snake_case)]
struct EditUserData {
Type: NumberOrString,
#[serde(deserialize_with = "deserialize_collections")]
Collections: Vec<CollectionData>,
AccessAll: bool,
}
#[post("/organizations/<org_id>/users/<user_id>", data = "<data>", rank = 1)]
fn edit_user(org_id: String, user_id: String, data: JsonUpcase<EditUserData>, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
#[put("/organizations/<org_id>/users/<org_user_id>", data = "<data>", rank = 1)]
fn put_organization_user(org_id: String, org_user_id: String, data: JsonUpcase<EditUserData>, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
edit_user(org_id, org_user_id, data, headers, conn)
}
#[post("/organizations/<org_id>/users/<org_user_id>", data = "<data>", rank = 1)]
fn edit_user(org_id: String, org_user_id: String, data: JsonUpcase<EditUserData>, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let data: EditUserData = data.into_inner().data;
let new_type = match UserOrgType::from_str(&data.Type.to_string()) {
let new_type = match UserOrgType::from_str(&data.Type.into_string()) {
Some(new_type) => new_type as i32,
None => err!("Invalid type")
};
let mut user_to_edit = match UserOrganization::find_by_uuid(&user_id, &conn) {
let mut user_to_edit = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) {
Some(user) => user,
None => err!("The specified user isn't member of the organization")
};
if new_type != UserOrgType::User as i32 &&
if new_type != user_to_edit.type_ as i32 && (
user_to_edit.type_ <= UserOrgType::Admin as i32 ||
new_type <= UserOrgType::Admin as i32
) &&
headers.org_user_type != UserOrgType::Owner as i32 {
err!("Only Owners can grant Admin or Owner type")
err!("Only Owners can grant and remove Admin or Owner privileges")
}
if user_to_edit.type_ != UserOrgType::User as i32 &&
if user_to_edit.type_ == UserOrgType::Owner as i32 &&
headers.org_user_type != UserOrgType::Owner as i32 {
err!("Only Owners can edit Admin or Owner")
err!("Only Owners can edit Owner users")
}
if user_to_edit.type_ == UserOrgType::Owner as i32 &&
@@ -449,21 +523,19 @@ fn edit_user(org_id: String, user_id: String, data: JsonUpcase<EditUserData>, he
// Delete all the odd collections
for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &user_to_edit.user_uuid, &conn) {
match c.delete(&conn) {
Ok(()) => (),
Err(_) => err!("Failed deleting old collection assignment")
if c.delete(&conn).is_err() {
err!("Failed deleting old collection assignment")
}
}
// If no accessAll, add the collections received
if !data.AccessAll {
for col in data.Collections.iter() {
match Collection::find_by_uuid_and_org(&col.id, &org_id, &conn) {
for col in &data.Collections {
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
None => err!("Collection not found in Organization"),
Some(collection) => {
match CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid, col.readOnly, &conn) {
Ok(()) => (),
Err(_) => err!("Failed saving collection access for user")
if CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid, col.ReadOnly, &conn).is_err() {
err!("Failed saving collection access for user")
}
}
}
@@ -475,9 +547,9 @@ fn edit_user(org_id: String, user_id: String, data: JsonUpcase<EditUserData>, he
Ok(())
}
#[post("/organizations/<org_id>/users/<user_id>/delete")]
fn delete_user(org_id: String, user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let user_to_delete = match UserOrganization::find_by_uuid(&user_id, &conn) {
#[delete("/organizations/<org_id>/users/<org_user_id>")]
fn delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let user_to_delete = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) {
Some(user) => user,
None => err!("User to delete isn't member of the organization")
};
@@ -502,4 +574,78 @@ fn delete_user(org_id: String, user_id: String, headers: AdminHeaders, conn: DbC
Ok(()) => Ok(()),
Err(_) => err!("Failed deleting user from organization")
}
}
#[post("/organizations/<org_id>/users/<org_user_id>/delete")]
fn post_delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
delete_user(org_id, org_user_id, headers, conn)
}
use super::ciphers::CipherData;
use super::ciphers::update_cipher_from_data;
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct ImportData {
Ciphers: Vec<CipherData>,
Collections: Vec<NewCollectionData>,
CollectionRelationships: Vec<RelationsData>,
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct RelationsData {
// Cipher index
Key: usize,
// Collection index
Value: usize,
}
#[post("/ciphers/import-organization?<query>", data = "<data>")]
fn post_org_import(query: OrgIdData, data: JsonUpcase<ImportData>, headers: Headers, conn: DbConn, ws: State<WebSocketUsers>) -> EmptyResult {
let data: ImportData = data.into_inner().data;
let org_id = query.organizationId;
let org_user = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
Some(user) => user,
None => err!("User is not part of the organization")
};
if org_user.type_ > UserOrgType::Admin as i32 {
err!("Only admins or owners can import into an organization")
}
// Read and create the collections
let collections: Vec<_> = data.Collections.into_iter().map(|coll| {
let mut collection = Collection::new(org_id.clone(), coll.Name);
collection.save(&conn);
collection
}).collect();
// Read the relations between collections and ciphers
let mut relations = Vec::new();
for relation in data.CollectionRelationships {
relations.push((relation.Key, relation.Value));
}
// Read and create the ciphers
let ciphers: Vec<_> = data.Ciphers.into_iter().map(|cipher_data| {
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &ws, UpdateType::SyncCipherCreate).ok();
cipher
}).collect();
// Assign the collections
for (cipher_index, coll_index) in relations {
let cipher_id = &ciphers[cipher_index].uuid;
let coll_id = &collections[coll_index].uuid;
CollectionCipher::save(cipher_id, coll_id, &conn);
}
let mut user = headers.user;
match user.update_revision(&conn) {
Ok(()) => Ok(()),
Err(_) => err!("Failed to update the revision, please log out and log back in to finish import.")
}
}

View File

@@ -1,28 +1,24 @@
use rocket_contrib::{Json, Value};
use data_encoding::BASE32;
use rocket_contrib::{Json, Value};
use serde_json;
use db::DbConn;
use db::{
models::{TwoFactor, TwoFactorType, User},
DbConn,
};
use crypto;
use api::{PasswordData, JsonResult, NumberOrString, JsonUpcase};
use api::{ApiResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
use auth::Headers;
#[get("/two-factor")]
fn get_twofactor(headers: Headers) -> JsonResult {
let data = if headers.user.totp_secret.is_none() {
Value::Null
} else {
json!([{
"Enabled": true,
"Type": 0,
"Object": "twoFactorProvider"
}])
};
fn get_twofactor(headers: Headers, conn: DbConn) -> JsonResult {
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
let twofactors_json: Vec<Value> = twofactors.iter().map(|c| c.to_json_list()).collect();
Ok(Json(json!({
"Data": data,
"Data": twofactors_json,
"Object": "list"
})))
}
@@ -58,7 +54,7 @@ fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
// Get the user
let mut user = match User::find_by_mail(&data.Email, &conn) {
Some(user) => user,
None => err!("Username or password is incorrect. Try again.")
None => err!("Username or password is incorrect. Try again."),
};
// Check password
@@ -71,24 +67,78 @@ fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
err!("Recovery code is incorrect. Try again.")
}
user.totp_secret = None;
// Remove all twofactors from the user
for twofactor in TwoFactor::find_by_user(&user.uuid, &conn) {
twofactor.delete(&conn).expect("Error deleting twofactor");
}
// Remove the recovery code, not needed without twofactors
user.totp_recover = None;
user.save(&conn);
Ok(Json(json!({})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct DisableTwoFactorData {
MasterPasswordHash: String,
Type: NumberOrString,
}
#[post("/two-factor/disable", data = "<data>")]
fn disable_twofactor(
data: JsonUpcase<DisableTwoFactorData>,
headers: Headers,
conn: DbConn,
) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
if !headers.user.check_valid_password(&password_hash) {
err!("Invalid password");
}
let type_ = data.Type.into_i32().expect("Invalid type");
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&headers.user.uuid, type_, &conn) {
twofactor.delete(&conn).expect("Error deleting twofactor");
}
Ok(Json(json!({
"Enabled": false,
"Type": type_,
"Object": "twoFactorProvider"
})))
}
#[put("/two-factor/disable", data = "<data>")]
fn disable_twofactor_put(
data: JsonUpcase<DisableTwoFactorData>,
headers: Headers,
conn: DbConn,
) -> JsonResult {
disable_twofactor(data, headers, conn)
}
#[post("/two-factor/get-authenticator", data = "<data>")]
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers) -> JsonResult {
fn generate_authenticator(
data: JsonUpcase<PasswordData>,
headers: Headers,
conn: DbConn,
) -> JsonResult {
let data: PasswordData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let (enabled, key) = match headers.user.totp_secret {
Some(secret) => (true, secret),
_ => (false, BASE32.encode(&crypto::get_random(vec![0u8; 20])))
let type_ = TwoFactorType::Authenticator as i32;
let twofactor = TwoFactor::find_by_user_and_type(&headers.user.uuid, type_, &conn);
let (enabled, key) = match twofactor {
Some(tf) => (true, tf.data),
_ => (false, BASE32.encode(&crypto::get_random(vec![0u8; 20]))),
};
Ok(Json(json!({
@@ -100,20 +150,24 @@ fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers) -> J
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableTwoFactorData {
struct EnableAuthenticatorData {
MasterPasswordHash: String,
Key: String,
Token: NumberOrString,
}
#[post("/two-factor/authenticator", data = "<data>")]
fn activate_authenticator(data: JsonUpcase<EnableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableTwoFactorData = data.into_inner().data;
fn activate_authenticator(
data: JsonUpcase<EnableAuthenticatorData>,
headers: Headers,
conn: DbConn,
) -> JsonResult {
let data: EnableAuthenticatorData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
let key = data.Key;
let token = match data.Token.to_i32() {
let token = match data.Token.into_i32() {
Some(n) => n as u64,
None => err!("Malformed token")
None => err!("Malformed token"),
};
if !headers.user.check_valid_password(&password_hash) {
@@ -123,27 +177,24 @@ fn activate_authenticator(data: JsonUpcase<EnableTwoFactorData>, headers: Header
// Validate key as base32 and 20 bytes length
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
Ok(decoded) => decoded,
_ => err!("Invalid totp secret")
_ => err!("Invalid totp secret"),
};
if decoded_key.len() != 20 {
err!("Invalid key length")
}
// Set key in user.totp_secret
let mut user = headers.user;
user.totp_secret = Some(key.to_uppercase());
let type_ = TwoFactorType::Authenticator;
let twofactor = TwoFactor::new(headers.user.uuid.clone(), type_, key.to_uppercase());
// Validate the token provided with the key
if !user.check_totp_code(token) {
if !twofactor.check_totp_code(token) {
err!("Invalid totp code")
}
// Generate totp_recover
let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20]));
user.totp_recover = Some(totp_recover);
user.save(&conn);
let mut user = headers.user;
_generate_recover_code(&mut user, &conn);
twofactor.save(&conn).expect("Error saving twofactor");
Ok(Json(json!({
"Enabled": true,
@@ -152,32 +203,280 @@ fn activate_authenticator(data: JsonUpcase<EnableTwoFactorData>, headers: Header
})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct DisableTwoFactorData {
MasterPasswordHash: String,
Type: NumberOrString,
#[put("/two-factor/authenticator", data = "<data>")]
fn activate_authenticator_put(
data: JsonUpcase<EnableAuthenticatorData>,
headers: Headers,
conn: DbConn,
) -> JsonResult {
activate_authenticator(data, headers, conn)
}
#[post("/two-factor/disable", data = "<data>")]
fn disable_authenticator(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
let _type = data.Type;
fn _generate_recover_code(user: &mut User, conn: &DbConn) {
if user.totp_recover.is_none() {
let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20]));
user.totp_recover = Some(totp_recover);
user.save(conn);
}
}
if !headers.user.check_valid_password(&password_hash) {
use u2f::messages::{RegisterResponse, SignResponse, U2fSignRequest};
use u2f::protocol::{Challenge, U2f};
use u2f::register::Registration;
use CONFIG;
const U2F_VERSION: &str = "U2F_V2";
lazy_static! {
static ref APP_ID: String = format!("{}/app-id.json", &CONFIG.domain);
static ref U2F: U2f = U2f::new(APP_ID.clone());
}
#[post("/two-factor/get-u2f", data = "<data>")]
fn generate_u2f(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
if !CONFIG.domain_set {
err!("`DOMAIN` environment variable is not set. U2F disabled")
}
let data: PasswordData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let mut user = headers.user;
user.totp_secret = None;
user.totp_recover = None;
let user_uuid = &headers.user.uuid;
user.save(&conn);
let u2f_type = TwoFactorType::U2f as i32;
let register_type = TwoFactorType::U2fRegisterChallenge;
let (enabled, challenge) = match TwoFactor::find_by_user_and_type(user_uuid, u2f_type, &conn) {
Some(_) => (true, String::new()),
None => {
let c = _create_u2f_challenge(user_uuid, register_type, &conn);
(false, c.challenge)
}
};
Ok(Json(json!({
"Enabled": false,
"Type": 0,
"Object": "twoFactorProvider"
"Enabled": enabled,
"Challenge": {
"UserId": headers.user.uuid,
"AppId": APP_ID.to_string(),
"Challenge": challenge,
"Version": U2F_VERSION,
},
"Object": "twoFactorU2f"
})))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableU2FData {
MasterPasswordHash: String,
DeviceResponse: String,
}
// This struct is copied from the U2F lib
// because challenge is not always sent
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct RegisterResponseCopy {
pub registration_data: String,
pub version: String,
pub challenge: Option<String>,
pub error_code: Option<NumberOrString>,
pub client_data: String,
}
impl RegisterResponseCopy {
fn into_response(self, challenge: String) -> RegisterResponse {
RegisterResponse {
registration_data: self.registration_data,
version: self.version,
challenge,
client_data: self.client_data,
}
}
}
#[post("/two-factor/u2f", data = "<data>")]
fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableU2FData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let tf_challenge = TwoFactor::find_by_user_and_type(
&headers.user.uuid,
TwoFactorType::U2fRegisterChallenge as i32,
&conn,
);
if let Some(tf_challenge) = tf_challenge {
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)
.expect("Can't parse U2fRegisterChallenge data");
tf_challenge
.delete(&conn)
.expect("Error deleting U2F register challenge");
let response_copy: RegisterResponseCopy =
serde_json::from_str(&data.DeviceResponse).expect("Can't parse RegisterResponse data");
let error_code = response_copy
.error_code
.clone()
.map_or("0".into(), NumberOrString::into_string);
if error_code != "0" {
err!("Error registering U2F token")
}
let response = response_copy.into_response(challenge.challenge.clone());
match U2F.register_response(challenge.clone(), response) {
Ok(registration) => {
// TODO: Allow more than one U2F device
let mut registrations = Vec::new();
registrations.push(registration);
let tf_registration = TwoFactor::new(
headers.user.uuid.clone(),
TwoFactorType::U2f,
serde_json::to_string(&registrations).unwrap(),
);
tf_registration
.save(&conn)
.expect("Error saving U2F registration");
let mut user = headers.user;
_generate_recover_code(&mut user, &conn);
Ok(Json(json!({
"Enabled": true,
"Challenge": {
"UserId": user.uuid,
"AppId": APP_ID.to_string(),
"Challenge": challenge,
"Version": U2F_VERSION,
},
"Object": "twoFactorU2f"
})))
}
Err(e) => {
println!("Error: {:#?}", e);
err!("Error activating u2f")
}
}
} else {
err!("Can't recover challenge")
}
}
#[put("/two-factor/u2f", data = "<data>")]
fn activate_u2f_put(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_u2f(data,headers, conn)
}
fn _create_u2f_challenge(user_uuid: &str, type_: TwoFactorType, conn: &DbConn) -> Challenge {
let challenge = U2F.generate_challenge().unwrap();
TwoFactor::new(
user_uuid.into(),
type_,
serde_json::to_string(&challenge).unwrap(),
).save(conn)
.expect("Error saving challenge");
challenge
}
// This struct is copied from the U2F lib
// because it doesn't implement Deserialize
#[derive(Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
struct RegistrationCopy {
pub key_handle: Vec<u8>,
pub pub_key: Vec<u8>,
pub attestation_cert: Option<Vec<u8>>,
}
impl Into<Registration> for RegistrationCopy {
fn into(self) -> Registration {
Registration {
key_handle: self.key_handle,
pub_key: self.pub_key,
attestation_cert: self.attestation_cert,
}
}
}
fn _parse_registrations(registations: &str) -> Vec<Registration> {
let registrations_copy: Vec<RegistrationCopy> =
serde_json::from_str(registations).expect("Can't parse RegistrationCopy data");
registrations_copy.into_iter().map(Into::into).collect()
}
pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRequest> {
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn);
let type_ = TwoFactorType::U2f as i32;
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
Some(tf) => tf,
None => err!("No U2F devices registered"),
};
let registrations = _parse_registrations(&twofactor.data);
let signed_request: U2fSignRequest = U2F.sign_request(challenge, registrations);
Ok(signed_request)
}
pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> ApiResult<()> {
let challenge_type = TwoFactorType::U2fLoginChallenge as i32;
let u2f_type = TwoFactorType::U2f as i32;
let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, &conn);
let challenge = match tf_challenge {
Some(tf_challenge) => {
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)
.expect("Can't parse U2fLoginChallenge data");
tf_challenge
.delete(&conn)
.expect("Error deleting U2F login challenge");
challenge
}
None => err!("Can't recover login challenge"),
};
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, u2f_type, conn) {
Some(tf) => tf,
None => err!("No U2F devices registered"),
};
let registrations = _parse_registrations(&twofactor.data);
let response: SignResponse =
serde_json::from_str(response).expect("Can't parse SignResponse data");
let mut _counter: u32 = 0;
for registration in registrations {
let response =
U2F.sign_response(challenge.clone(), registration, response.clone(), _counter);
match response {
Ok(new_counter) => {
_counter = new_counter;
println!("O {:#}", new_counter);
return Ok(());
}
Err(e) => {
println!("E {:#}", e);
break;
}
}
}
err!("error verifying response")
}

View File

@@ -1,4 +1,3 @@
use std::io;
use std::io::prelude::*;
use std::fs::{create_dir_all, File};
@@ -19,28 +18,63 @@ fn icon(domain: String) -> Content<Vec<u8>> {
let icon_type = ContentType::new("image", "x-icon");
// Validate the domain to avoid directory traversal attacks
if domain.contains("/") || domain.contains("..") {
if domain.contains('/') || domain.contains("..") {
return Content(icon_type, get_fallback_icon());
}
let url = format!("https://icons.bitwarden.com/{}/icon.png", domain);
// Get the icon, or fallback in case of error
let icon = match get_icon_cached(&domain, &url) {
Ok(icon) => icon,
Err(_) => return Content(icon_type, get_fallback_icon())
};
let icon = get_icon(&domain);
Content(icon_type, icon)
}
fn get_icon(url: &str) -> Result<Vec<u8>, reqwest::Error> {
fn get_icon (domain: &str) -> Vec<u8> {
let path = format!("{}/{}.png", CONFIG.icon_cache_folder, domain);
if let Some(icon) = get_cached_icon(&path) {
return icon;
}
let url = get_icon_url(&domain);
// Get the icon, or fallback in case of error
match download_icon(&url) {
Ok(icon) => {
save_icon(&path, &icon);
icon
},
Err(e) => {
println!("Error downloading icon: {:?}", e);
get_fallback_icon()
}
}
}
fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
// Try to read the cached icon, and return it if it exists
if let Ok(mut f) = File::open(path) {
let mut buffer = Vec::new();
if f.read_to_end(&mut buffer).is_ok() {
return Some(buffer);
}
}
None
}
fn get_icon_url(domain: &str) -> String {
if CONFIG.local_icon_extractor {
format!("http://{}/favicon.ico", domain)
} else {
format!("https://icons.bitwarden.com/{}/icon.png", domain)
}
}
fn download_icon(url: &str) -> Result<Vec<u8>, reqwest::Error> {
println!("Downloading icon for {}...", url);
let mut res = reqwest::get(url)?;
res = match res.error_for_status() {
Err(e) => return Err(e),
Ok(res) => res
};
res = res.error_for_status()?;
let mut buffer: Vec<u8> = vec![];
res.copy_to(&mut buffer)?;
@@ -48,39 +82,31 @@ fn get_icon(url: &str) -> Result<Vec<u8>, reqwest::Error> {
Ok(buffer)
}
fn get_icon_cached(key: &str, url: &str) -> io::Result<Vec<u8>> {
create_dir_all(&CONFIG.icon_cache_folder)?;
let path = &format!("{}/{}.png", CONFIG.icon_cache_folder, key);
fn save_icon(path: &str, icon: &[u8]) {
create_dir_all(&CONFIG.icon_cache_folder).expect("Error creating icon cache");
// Try to read the cached icon, and return it if it exists
match File::open(path) {
Ok(mut f) => {
let mut buffer = Vec::new();
if f.read_to_end(&mut buffer).is_ok() {
return Ok(buffer);
}
/* If error reading file continue */
}
Err(_) => { /* Continue */ }
}
println!("Downloading icon for {}...", key);
let icon = match get_icon(url) {
Ok(icon) => icon,
Err(_) => return Err(io::Error::new(io::ErrorKind::NotFound, ""))
if let Ok(mut f) = File::create(path) {
f.write_all(icon).expect("Error writing icon file");
};
// Save the currently downloaded icon
match File::create(path) {
Ok(mut f) => { f.write_all(&icon).expect("Error writing icon file"); }
Err(_) => { /* Continue */ }
};
Ok(icon)
}
const FALLBACK_ICON_URL: &str = "https://raw.githubusercontent.com/bitwarden/web/master/src/images/fa-globe.png";
fn get_fallback_icon() -> Vec<u8> {
let fallback_icon = "https://raw.githubusercontent.com/bitwarden/web/master/src/images/fa-globe.png";
get_icon_cached("default", fallback_icon).unwrap()
let path = format!("{}/default.png", CONFIG.icon_cache_folder);
if let Some(icon) = get_cached_icon(&path) {
return icon;
}
match download_icon(FALLBACK_ICON_URL) {
Ok(icon) => {
save_icon(&path, &icon);
icon
},
Err(e) => {
println!("Error downloading fallback icon: {:?}", e);
vec![]
}
}
}

View File

@@ -1,40 +1,44 @@
use std::collections::HashMap;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use rocket::{Route, Outcome};
use rocket::request::{self, Request, FromRequest, Form, FormItems, FromForm};
use rocket::request::{self, Form, FormItems, FromForm, FromRequest, Request};
use rocket::{Outcome, Route};
use rocket_contrib::{Json, Value};
use db::DbConn;
use num_traits::FromPrimitive;
use db::models::*;
use db::DbConn;
use util;
use util::{self, JsonMap};
use api::JsonResult;
use api::{ApiResult, JsonResult};
use CONFIG;
pub fn routes() -> Vec<Route> {
routes![ login]
routes![login]
}
#[post("/connect/token", data = "<connect_data>")]
fn login(connect_data: Form<ConnectData>, device_type: DeviceType, conn: DbConn) -> JsonResult {
fn login(connect_data: Form<ConnectData>, device_type: DeviceType, conn: DbConn, socket: Option<SocketAddr>) -> JsonResult {
let data = connect_data.get();
println!("{:#?}", data);
match data.grant_type {
GrantType::RefreshToken =>_refresh_login(data, device_type, conn),
GrantType::Password => _password_login(data, device_type, conn)
GrantType::RefreshToken => _refresh_login(data, device_type, conn),
GrantType::Password => _password_login(data, device_type, conn, socket),
}
}
fn _refresh_login(data: &ConnectData, _device_type: DeviceType, conn: DbConn) -> JsonResult {
// Extract token
let token = data.get("refresh_token").unwrap();
let token = data.get("refresh_token");
// Get device by refresh token
let mut device = match Device::find_by_refresh_token(token, &conn) {
Some(device) => device,
None => err!("Invalid refresh token")
None => err!("Invalid refresh token"),
};
// COMMON
@@ -54,37 +58,48 @@ fn _refresh_login(data: &ConnectData, _device_type: DeviceType, conn: DbConn) ->
})))
}
fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn) -> JsonResult {
fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn, remote: Option<SocketAddr>) -> JsonResult {
// Get the ip for error reporting
let ip = match remote {
Some(ip) => ip.ip(),
None => IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
};
// Validate scope
let scope = data.get("scope").unwrap();
let scope = data.get("scope");
if scope != "api offline_access" {
err!("Scope not supported")
}
// Get the user
let username = data.get("username").unwrap();
let username = data.get("username");
let user = match User::find_by_mail(username, &conn) {
Some(user) => user,
None => err!("Username or password is incorrect. Try again.")
None => err!(format!(
"Username or password is incorrect. Try again. IP: {}. Username: {}.",
ip, username
)),
};
// Check password
let password = data.get("password").unwrap();
let password = data.get("password");
if !user.check_valid_password(password) {
err!("Username or password is incorrect. Try again.")
err!(format!(
"Username or password is incorrect. Try again. IP: {}. Username: {}.",
ip, username
))
}
// Let's only use the header and ignore the 'devicetype' parameter
let device_type_num = device_type.0;
let (device_id, device_name) = match data.is_device {
false => { (format!("web-{}", user.uuid), String::from("web")) }
true => {
(
data.get("deviceidentifier").unwrap().clone(),
data.get("devicename").unwrap().clone(),
)
}
let (device_id, device_name) = if data.is_device {
(
data.get("deviceidentifier").clone(),
data.get("devicename").clone(),
)
} else {
(format!("web-{}", user.uuid), String::from("web"))
};
// Find device or create new
@@ -104,42 +119,7 @@ fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn) ->
}
};
let twofactor_token = if user.requires_twofactor() {
let twofactor_provider = util::parse_option_string(data.get("twoFactorProvider")).unwrap_or(0);
let twofactor_code = match data.get("twoFactorToken") {
Some(code) => code,
None => err_json!(_json_err_twofactor())
};
match twofactor_provider {
0 /* TOTP */ => {
let totp_code: u64 = match twofactor_code.parse() {
Ok(code) => code,
Err(_) => err!("Invalid Totp code")
};
if !user.check_totp_code(totp_code) {
err_json!(_json_err_twofactor())
}
if util::parse_option_string(data.get("twoFactorRemember")).unwrap_or(0) == 1 {
device.refresh_twofactor_remember();
device.twofactor_remember.clone()
} else {
device.delete_twofactor_remember();
None
}
},
5 /* Remember */ => {
match device.twofactor_remember {
Some(ref remember) if remember == twofactor_code => (),
_ => err_json!(_json_err_twofactor())
};
None // No twofactor token needed here
},
_ => err!("Invalid two factor provider"),
}
} else { None }; // No twofactor token if twofactor is disabled
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &conn)?;
// Common
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
@@ -165,48 +145,127 @@ fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn) ->
Ok(Json(result))
}
fn _json_err_twofactor() -> Value {
json!({
fn twofactor_auth(
user_uuid: &str,
data: &ConnectData,
device: &mut Device,
conn: &DbConn,
) -> ApiResult<Option<String>> {
let twofactors_raw = TwoFactor::find_by_user(user_uuid, conn);
// Remove u2f challenge twofactors (impl detail)
let twofactors: Vec<_> = twofactors_raw.iter().filter(|tf| tf.type_ < 1000).collect();
let providers: Vec<_> = twofactors.iter().map(|tf| tf.type_).collect();
// No twofactor token if twofactor is disabled
if twofactors.is_empty() {
return Ok(None);
}
let provider = match util::try_parse_string(data.get_opt("twoFactorProvider")) {
Some(provider) => provider,
None => providers[0], // If we aren't given a two factor provider, asume the first one
};
let twofactor_code = match data.get_opt("twoFactorToken") {
Some(code) => code,
None => err_json!(_json_err_twofactor(&providers, user_uuid, conn)?),
};
let twofactor = twofactors.iter().filter(|tf| tf.type_ == provider).nth(0);
match TwoFactorType::from_i32(provider) {
Some(TwoFactorType::Remember) => {
match &device.twofactor_remember {
Some(remember) if remember == twofactor_code => return Ok(None), // No twofactor token needed here
_ => err_json!(_json_err_twofactor(&providers, user_uuid, conn)?),
}
}
Some(TwoFactorType::Authenticator) => {
let twofactor = match twofactor {
Some(tf) => tf,
None => err!("TOTP not enabled"),
};
let totp_code: u64 = match twofactor_code.parse() {
Ok(code) => code,
_ => err!("Invalid TOTP code"),
};
if !twofactor.check_totp_code(totp_code) {
err_json!(_json_err_twofactor(&providers, user_uuid, conn)?)
}
}
Some(TwoFactorType::U2f) => {
use api::core::two_factor;
two_factor::validate_u2f_login(user_uuid, twofactor_code, conn)?;
}
_ => err!("Invalid two factor provider"),
}
if util::try_parse_string_or(data.get_opt("twoFactorRemember"), 0) == 1 {
Ok(Some(device.refresh_twofactor_remember()))
} else {
device.delete_twofactor_remember();
Ok(None)
}
}
fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {
use api::core::two_factor;
let mut result = json!({
"error" : "invalid_grant",
"error_description" : "Two factor required.",
"TwoFactorProviders" : [ 0 ],
"TwoFactorProviders2" : { "0" : null }
})
}
"TwoFactorProviders" : providers,
"TwoFactorProviders2" : {} // { "0" : null }
});
/*
ConnectData {
grant_type: Password,
is_device: false,
data: {
"scope": "api offline_access",
"client_id": "web",
"grant_type": "password",
"username": "dani@mail",
"password": "8IuV1sJ94tPjyYIK+E+PTjblzjm4W6C4N5wqM0KKsSg="
for provider in providers {
result["TwoFactorProviders2"][provider.to_string()] = Value::Null;
match TwoFactorType::from_i32(*provider) {
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
Some(TwoFactorType::U2f) if CONFIG.domain_set => {
let request = two_factor::generate_u2f_login(user_uuid, conn)?;
let mut challenge_list = Vec::new();
for key in request.registered_keys {
let mut challenge_map = JsonMap::new();
challenge_map.insert("appId".into(), Value::String(request.app_id.clone()));
challenge_map
.insert("challenge".into(), Value::String(request.challenge.clone()));
challenge_map.insert("version".into(), Value::String(key.version));
challenge_map.insert(
"keyHandle".into(),
Value::String(key.key_handle.unwrap_or_default()),
);
challenge_list.push(Value::Object(challenge_map));
}
let mut map = JsonMap::new();
use serde_json;
let challenge_list_str = serde_json::to_string(&challenge_list).unwrap();
map.insert("Challenges".into(), Value::String(challenge_list_str));
result["TwoFactorProviders2"][provider.to_string()] = Value::Object(map);
}
_ => {}
}
}
Ok(result)
}
RETURNS "TwoFactorToken": "11122233333444555666777888999"
Next login
ConnectData {
grant_type: Password,
is_device: false,
data: {
"scope": "api offline_access",
"username": "dani@mail",
"client_id": "web",
"twofactorprovider": "5",
"twofactortoken": "11122233333444555666777888999",
"grant_type": "password",
"twofactorremember": "0",
"password": "8IuV1sJ94tPjyYIK+E+PTjblzjm4W6C4N5wqM0KKsSg="
}
}
*/
#[derive(Clone, Copy)]
struct DeviceType(i32);
impl<'a, 'r> FromRequest<'a, 'r> for DeviceType {
@@ -215,13 +274,12 @@ impl<'a, 'r> FromRequest<'a, 'r> for DeviceType {
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
let headers = request.headers();
let type_opt = headers.get_one("Device-Type");
let type_num = util::parse_option_string(type_opt).unwrap_or(0);
let type_num = util::try_parse_string_or(type_opt, 0);
Outcome::Success(DeviceType(type_num))
}
}
#[derive(Debug)]
struct ConnectData {
grant_type: GrantType,
@@ -230,10 +288,17 @@ struct ConnectData {
}
#[derive(Debug, Copy, Clone)]
enum GrantType { RefreshToken, Password }
enum GrantType {
RefreshToken,
Password,
}
impl ConnectData {
fn get(&self, key: &str) -> Option<&String> {
fn get(&self, key: &str) -> &String {
&self.data[&key.to_lowercase()]
}
fn get_opt(&self, key: &str) -> Option<&String> {
self.data.get(&key.to_lowercase())
}
}
@@ -252,30 +317,33 @@ impl<'f> FromForm<'f> for ConnectData {
for (key, value) in items {
match (key.url_decode(), value.url_decode()) {
(Ok(key), Ok(value)) => data.insert(key.to_lowercase(), value),
_ => return Err(format!("Error decoding key or value")),
_ => return Err("Error decoding key or value".to_string()),
};
}
// Validate needed values
let (grant_type, is_device) =
match data.get("grant_type").map(String::as_ref) {
Some("refresh_token") => {
check_values(&data, &VALUES_REFRESH)?;
(GrantType::RefreshToken, false) // Device doesn't matter here
}
Some("password") => {
check_values(&data, &VALUES_PASSWORD)?;
let (grant_type, is_device) = match data.get("grant_type").map(String::as_ref) {
Some("refresh_token") => {
check_values(&data, &VALUES_REFRESH)?;
(GrantType::RefreshToken, false) // Device doesn't matter here
}
Some("password") => {
check_values(&data, &VALUES_PASSWORD)?;
let is_device = match data.get("client_id").unwrap().as_ref() {
"browser" | "mobile" => check_values(&data, &VALUES_DEVICE)?,
_ => false
};
(GrantType::Password, is_device)
}
_ => return Err(format!("Grant type not supported"))
};
let is_device = match data["client_id"].as_ref() {
"browser" | "mobile" => check_values(&data, &VALUES_DEVICE)?,
_ => false,
};
(GrantType::Password, is_device)
}
_ => return Err("Grant type not supported".to_string()),
};
Ok(ConnectData { grant_type, is_device, data })
Ok(ConnectData {
grant_type,
is_device,
data,
})
}
}

View File

@@ -1,19 +1,23 @@
mod core;
pub(crate) mod core;
mod icons;
mod identity;
mod web;
mod notifications;
pub use self::core::routes as core_routes;
pub use self::icons::routes as icons_routes;
pub use self::identity::routes as identity_routes;
pub use self::web::routes as web_routes;
pub use self::notifications::routes as notifications_routes;
pub use self::notifications::{start_notification_server, WebSocketUsers, UpdateType};
use rocket::response::status::BadRequest;
use rocket_contrib::Json;
// Type aliases for API methods results
type JsonResult = Result<Json, BadRequest<Json>>;
type EmptyResult = Result<(), BadRequest<Json>>;
type ApiResult<T> = Result<T, BadRequest<Json>>;
type JsonResult = ApiResult<Json>;
type EmptyResult = ApiResult<()>;
use util;
type JsonUpcase<T> = Json<util::UpCase<T>>;
@@ -25,7 +29,7 @@ struct PasswordData {
MasterPasswordHash: String
}
#[derive(Deserialize, Debug)]
#[derive(Deserialize, Debug, Clone)]
#[serde(untagged)]
enum NumberOrString {
Number(i32),
@@ -33,14 +37,14 @@ enum NumberOrString {
}
impl NumberOrString {
fn to_string(self) -> String {
fn into_string(self) -> String {
match self {
NumberOrString::Number(n) => n.to_string(),
NumberOrString::String(s) => s
}
}
fn to_i32(self) -> Option<i32> {
fn into_i32(self) -> Option<i32> {
match self {
NumberOrString::Number(n) => Some(n),
NumberOrString::String(s) => s.parse().ok()

366
src/api/notifications.rs Normal file
View File

@@ -0,0 +1,366 @@
use rocket::Route;
use rocket_contrib::Json;
use api::JsonResult;
use auth::Headers;
use db::DbConn;
use CONFIG;
pub fn routes() -> Vec<Route> {
routes![negotiate, websockets_err]
}
#[get("/hub")]
fn websockets_err() -> JsonResult {
err!("'/notifications/hub' should be proxied towards the websocket server, otherwise notifications will not work. Go to the README for more info.")
}
#[post("/hub/negotiate")]
fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
use crypto;
use data_encoding::BASE64URL;
let conn_id = BASE64URL.encode(&crypto::get_random(vec![0u8; 16]));
// TODO: Implement transports
// Rocket WS support: https://github.com/SergioBenitez/Rocket/issues/90
// Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33
Ok(Json(json!({
"connectionId": conn_id,
"availableTransports":[
{"transport":"WebSockets", "transferFormats":["Text","Binary"]},
// {"transport":"ServerSentEvents", "transferFormats":["Text"]},
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
]
})))
}
///
/// Websockets server
///
use std::sync::Arc;
use std::thread;
use ws::{self, util::Token, Factory, Handler, Handshake, Message, Sender, WebSocket};
use chashmap::CHashMap;
use chrono::NaiveDateTime;
use serde_json::from_str;
use db::models::{Cipher, Folder, User};
use rmpv::Value;
fn serialize(val: Value) -> Vec<u8> {
use rmpv::encode::write_value;
let mut buf = Vec::new();
write_value(&mut buf, &val).expect("Error encoding MsgPack");
// Add size bytes at the start
// Extracted from BinaryMessageFormat.js
let mut size: usize = buf.len();
let mut len_buf: Vec<u8> = Vec::new();
loop {
let mut size_part = size & 0x7f;
size >>= 7;
if size > 0 {
size_part |= 0x80;
}
len_buf.push(size_part as u8);
if size == 0 {
break;
}
}
len_buf.append(&mut buf);
len_buf
}
fn serialize_date(date: NaiveDateTime) -> Value {
let seconds: i64 = date.timestamp();
let nanos: i64 = date.timestamp_subsec_nanos() as i64;
let timestamp = nanos << 34 | seconds;
use byteorder::{BigEndian, WriteBytesExt};
let mut bs = [0u8; 8];
bs.as_mut()
.write_i64::<BigEndian>(timestamp)
.expect("Unable to write");
// -1 is Timestamp
// https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type
Value::Ext(-1, bs.to_vec())
}
fn convert_option<T: Into<Value>>(option: Option<T>) -> Value {
match option {
Some(a) => a.into(),
None => Value::Nil,
}
}
// Server WebSocket handler
pub struct WSHandler {
out: Sender,
user_uuid: Option<String>,
users: WebSocketUsers,
}
const RECORD_SEPARATOR: u8 = 0x1e;
const INITIAL_RESPONSE: [u8; 3] = [0x7b, 0x7d, RECORD_SEPARATOR]; // {, }, <RS>
#[derive(Deserialize)]
struct InitialMessage {
protocol: String,
version: i32,
}
const PING_MS: u64 = 15_000;
const PING: Token = Token(1);
impl Handler for WSHandler {
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
// TODO: Improve this split
let path = hs.request.resource();
let mut query_split: Vec<_> = path.split('?').nth(1).unwrap().split('&').collect();
query_split.sort();
let access_token = &query_split[0][13..];
let _id = &query_split[1][3..];
// Validate the user
use auth;
let claims = match auth::decode_jwt(access_token) {
Ok(claims) => claims,
Err(_) => {
return Err(ws::Error::new(
ws::ErrorKind::Internal,
"Invalid access token provided",
))
}
};
// Assign the user to the handler
let user_uuid = claims.sub;
self.user_uuid = Some(user_uuid.clone());
// Add the current Sender to the user list
let handler_insert = self.out.clone();
let handler_update = self.out.clone();
self.users.map.upsert(
user_uuid,
|| vec![handler_insert],
|ref mut v| v.push(handler_update),
);
// Schedule a ping to keep the connection alive
self.out.timeout(PING_MS, PING)
}
fn on_message(&mut self, msg: Message) -> ws::Result<()> {
println!("Server got message '{}'. ", msg);
if let Message::Text(text) = msg.clone() {
let json = &text[..text.len() - 1]; // Remove last char
if let Ok(InitialMessage { protocol, version }) = from_str::<InitialMessage>(json) {
if &protocol == "messagepack" && version == 1 {
return self.out.send(&INITIAL_RESPONSE[..]); // Respond to initial message
}
}
}
// If it's not the initial message, just echo the message
self.out.send(msg)
}
fn on_timeout(&mut self, event: Token) -> ws::Result<()> {
if event == PING {
// send ping
self.out.send(create_ping())?;
// reschedule the timeout
self.out.timeout(PING_MS, PING)
} else {
Err(ws::Error::new(
ws::ErrorKind::Internal,
"Invalid timeout token provided",
))
}
}
}
struct WSFactory {
pub users: WebSocketUsers,
}
impl WSFactory {
pub fn init() -> Self {
WSFactory {
users: WebSocketUsers {
map: Arc::new(CHashMap::new()),
},
}
}
}
impl Factory for WSFactory {
type Handler = WSHandler;
fn connection_made(&mut self, out: Sender) -> Self::Handler {
println!("WS: Connection made");
WSHandler {
out,
user_uuid: None,
users: self.users.clone(),
}
}
fn connection_lost(&mut self, handler: Self::Handler) {
println!("WS: Connection lost");
// Remove handler
let user_uuid = &handler.user_uuid.unwrap();
if let Some(mut user_conn) = self.users.map.get_mut(user_uuid) {
user_conn.remove_item(&handler.out);
}
}
}
#[derive(Clone)]
pub struct WebSocketUsers {
pub map: Arc<CHashMap<String, Vec<Sender>>>,
}
impl WebSocketUsers {
fn send_update(&self, user_uuid: &String, data: Vec<u8>) -> ws::Result<()> {
if let Some(user) = self.map.get(user_uuid) {
for sender in user.iter() {
sender.send(data.clone())?;
}
}
Ok(())
}
// NOTE: The last modified date needs to be updated before calling these methods
pub fn send_user_update(&self, ut: UpdateType, user: &User) {
let data = create_update(
vec![
("UserId".into(), user.uuid.clone().into()),
("Date".into(), serialize_date(user.updated_at)),
],
ut,
);
self.send_update(&user.uuid.clone(), data).ok();
}
pub fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {
let data = create_update(
vec![
("Id".into(), folder.uuid.clone().into()),
("UserId".into(), folder.user_uuid.clone().into()),
("RevisionDate".into(), serialize_date(folder.updated_at)),
],
ut,
);
self.send_update(&folder.user_uuid, data).ok();
}
pub fn send_cipher_update(&self, ut: UpdateType, cipher: &Cipher, user_uuids: &Vec<String>) {
let user_uuid = convert_option(cipher.user_uuid.clone());
let org_uuid = convert_option(cipher.organization_uuid.clone());
let data = create_update(
vec![
("Id".into(), cipher.uuid.clone().into()),
("UserId".into(), user_uuid),
("OrganizationId".into(), org_uuid),
("CollectionIds".into(), Value::Nil),
("RevisionDate".into(), serialize_date(cipher.updated_at)),
],
ut,
);
for uuid in user_uuids {
self.send_update(&uuid, data.clone()).ok();
}
}
}
/* Message Structure
[
1, // MessageType.Invocation
{}, // Headers
null, // InvocationId
"ReceiveMessage", // Target
[ // Arguments
{
"ContextId": "app_id",
"Type": ut as i32,
"Payload": {}
}
]
]
*/
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType) -> Vec<u8> {
use rmpv::Value as V;
let value = V::Array(vec![
1.into(),
V::Array(vec![]),
V::Nil,
"ReceiveMessage".into(),
V::Array(vec![V::Map(vec![
("ContextId".into(), "app_id".into()),
("Type".into(), (ut as i32).into()),
("Payload".into(), payload.into()),
])]),
]);
serialize(value)
}
fn create_ping() -> Vec<u8> {
serialize(Value::Array(vec![6.into()]))
}
#[allow(dead_code)]
pub enum UpdateType {
SyncCipherUpdate = 0,
SyncCipherCreate = 1,
SyncLoginDelete = 2,
SyncFolderDelete = 3,
SyncCiphers = 4,
SyncVault = 5,
SyncOrgKeys = 6,
SyncFolderCreate = 7,
SyncFolderUpdate = 8,
SyncCipherDelete = 9,
SyncSettings = 10,
LogOut = 11,
}
pub fn start_notification_server() -> WebSocketUsers {
let factory = WSFactory::init();
let users = factory.users.clone();
thread::spawn(move || {
WebSocket::new(factory)
.unwrap()
.listen(format!("0.0.0.0:{}", CONFIG.websocket_port))
.unwrap();
});
users
}

View File

@@ -1,39 +1,73 @@
use std::io;
use std::path::{Path, PathBuf};
use rocket::request::Request;
use rocket::response::{self, NamedFile, Responder};
use rocket::response::content::Content;
use rocket::http::{ContentType, Status};
use rocket::Route;
use rocket::response::NamedFile;
use rocket_contrib::Json;
use rocket_contrib::{Json, Value};
use CONFIG;
pub fn routes() -> Vec<Route> {
routes![index, files, attachments, alive]
if CONFIG.web_vault_enabled {
routes![web_index, app_id, web_files, attachments, alive]
} else {
routes![attachments, alive]
}
}
// TODO: Might want to use in memory cache: https://github.com/hgzimmerman/rocket-file-cache
#[get("/")]
fn index() -> io::Result<NamedFile> {
NamedFile::open(
Path::new(&CONFIG.web_vault_folder)
.join("index.html"))
fn web_index() -> WebHeaders<io::Result<NamedFile>> {
web_files("index.html".into())
}
#[get("/app-id.json")]
fn app_id() -> WebHeaders<Content<Json<Value>>> {
let content_type = ContentType::new("application", "fido.trusted-apps+json");
WebHeaders(Content(content_type, Json(json!({
"trustedFacets": [
{
"version": { "major": 1, "minor": 0 },
"ids": [
&CONFIG.domain,
"ios:bundle-id:com.8bit.bitwarden",
"android:apk-key-hash:dUGFzUzf3lmHSLBDBIv+WaFyZMI" ]
}]
}))))
}
#[get("/<p..>", rank = 1)] // Only match this if the other routes don't match
fn files(p: PathBuf) -> io::Result<NamedFile> {
NamedFile::open(
Path::new(&CONFIG.web_vault_folder)
.join(p))
fn web_files(p: PathBuf) -> WebHeaders<io::Result<NamedFile>> {
WebHeaders(NamedFile::open(Path::new(&CONFIG.web_vault_folder).join(p)))
}
struct WebHeaders<R>(R);
impl<'r, R: Responder<'r>> Responder<'r> for WebHeaders<R> {
fn respond_to(self, req: &Request) -> response::Result<'r> {
match self.0.respond_to(req) {
Ok(mut res) => {
res.set_raw_header("Referrer-Policy", "same-origin");
res.set_raw_header("X-Frame-Options", "SAMEORIGIN");
res.set_raw_header("X-Content-Type-Options", "nosniff");
res.set_raw_header("X-XSS-Protection", "1; mode=block");
Ok(res)
},
Err(_) => {
Err(Status::NotFound)
}
}
}
}
#[get("/attachments/<uuid>/<file..>")]
fn attachments(uuid: String, file: PathBuf) -> io::Result<NamedFile> {
NamedFile::open(
Path::new(&CONFIG.attachments_folder)
.join(uuid)
.join(file)
)
NamedFile::open(Path::new(&CONFIG.attachments_folder).join(uuid).join(file))
}

View File

@@ -11,10 +11,11 @@ use serde::ser::Serialize;
use CONFIG;
const JWT_ALGORITHM: jwt::Algorithm = jwt::Algorithm::RS256;
pub const JWT_ISSUER: &'static str = "localhost:8000/identity";
lazy_static! {
pub static ref DEFAULT_VALIDITY: Duration = Duration::hours(2);
pub static ref JWT_ISSUER: String = CONFIG.domain.clone();
static ref JWT_HEADER: jwt::Header = jwt::Header::new(JWT_ALGORITHM);
static ref PRIVATE_RSA_KEY: Vec<u8> = match read_file(&CONFIG.private_rsa_key) {
@@ -30,9 +31,9 @@ lazy_static! {
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
match jwt::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
Ok(token) => return token,
Ok(token) => token,
Err(e) => panic!("Error encoding jwt {}", e)
};
}
}
pub fn decode_jwt(token: &str) -> Result<JWTClaims, String> {
@@ -42,7 +43,7 @@ pub fn decode_jwt(token: &str) -> Result<JWTClaims, String> {
validate_iat: true,
validate_nbf: true,
aud: None,
iss: Some(JWT_ISSUER.into()),
iss: Some(JWT_ISSUER.clone()),
sub: None,
algorithms: vec![JWT_ALGORITHM],
};
@@ -94,7 +95,7 @@ use rocket::Outcome;
use rocket::request::{self, Request, FromRequest};
use db::DbConn;
use db::models::{User, UserOrganization, UserOrgType, Device};
use db::models::{User, UserOrganization, UserOrgType, UserOrgStatus, Device};
pub struct Headers {
pub host: String,
@@ -109,9 +110,31 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
let headers = request.headers();
// Get host
let host = match headers.get_one("Host") {
Some(host) => format!("http://{}", host), // TODO: Check if HTTPS
_ => String::new()
let host = if CONFIG.domain_set {
CONFIG.domain.clone()
} else if let Some(referer) = headers.get_one("Referer") {
referer.to_string()
} else {
// Try to guess from the headers
use std::env;
let protocol = if let Some(proto) = headers.get_one("X-Forwarded-Proto") {
proto
} else if env::var("ROCKET_TLS").is_ok() {
"https"
} else {
"http"
};
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
host
} else if let Some(host) = headers.get_one("Host") {
host
} else {
""
};
format!("{}://{}", protocol, host)
};
// Get access_token
@@ -182,7 +205,13 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
};
let org_user = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
Some(user) => user,
Some(user) => {
if user.status == UserOrgStatus::Confirmed as i32 {
user
} else {
err_handler!("The current user isn't confirmed member of the organization")
}
}
None => err_handler!("The current user isn't member of the organization")
};

View File

@@ -53,25 +53,42 @@ use db::schema::attachments;
/// Database methods
impl Attachment {
pub fn save(&self, conn: &DbConn) -> bool {
match diesel::replace_into(attachments::table)
pub fn save(&self, conn: &DbConn) -> QueryResult<()> {
diesel::replace_into(attachments::table)
.values(self)
.execute(&**conn) {
Ok(1) => true, // One row inserted
_ => false,
}
.execute(&**conn)
.and(Ok(()))
}
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
use util;
use std::{thread, time};
let mut retries = 10;
loop {
match diesel::delete(
attachments::table.filter(
attachments::id.eq(&self.id)
)
).execute(&**conn) {
Ok(_) => break,
Err(err) => {
if retries < 1 {
println!("ERROR: Failed with 10 retries");
return Err(err)
} else {
retries -= 1;
println!("Had to retry! Retries left: {}", retries);
thread::sleep(time::Duration::from_millis(500));
continue
}
}
}
}
util::delete_file(&self.get_file_path());
diesel::delete(
attachments::table.filter(
attachments::id.eq(self.id)
)
).execute(&**conn).and(Ok(()))
Ok(())
}
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> QueryResult<()> {
@@ -92,4 +109,10 @@ impl Attachment {
.filter(attachments::cipher_uuid.eq(cipher_uuid))
.load::<Self>(&**conn).expect("Error loading attachments")
}
pub fn find_by_ciphers(cipher_uuids: Vec<String>, conn: &DbConn) -> Vec<Self> {
attachments::table
.filter(attachments::cipher_uuid.eq_any(cipher_uuids))
.load::<Self>(&**conn).expect("Error loading attachments")
}
}

View File

@@ -3,7 +3,7 @@ use serde_json::Value as JsonValue;
use uuid::Uuid;
use super::{User, Organization, Attachment, FolderCipher, CollectionCipher, UserOrgType};
use super::{User, Organization, Attachment, FolderCipher, CollectionCipher, UserOrganization, UserOrgType, UserOrgStatus};
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
#[table_name = "ciphers"]
@@ -32,6 +32,7 @@ pub struct Cipher {
pub data: String,
pub favorite: bool,
pub password_history: Option<String>,
}
/// Local methods
@@ -55,6 +56,7 @@ impl Cipher {
fields: None,
data: String::new(),
password_history: None,
}
}
}
@@ -77,6 +79,10 @@ impl Cipher {
let fields_json: JsonValue = if let Some(ref fields) = self.fields {
serde_json::from_str(fields).unwrap()
} else { JsonValue::Null };
let password_history_json: JsonValue = if let Some(ref password_history) = self.password_history {
serde_json::from_str(password_history).unwrap()
} else { JsonValue::Null };
let mut data_json: JsonValue = serde_json::from_str(&self.data).unwrap();
@@ -84,7 +90,7 @@ impl Cipher {
// To remove backwards compatibility, just remove this entire section
// and remove the compat code from ciphers::update_cipher_from_data
if self.type_ == 1 && data_json["Uris"].is_array() {
let uri = data_json["Uris"][0]["uri"].clone();
let uri = data_json["Uris"][0]["Uri"].clone();
data_json["Uri"] = uri;
}
// TODO: ******* Backwards compat end **********
@@ -97,7 +103,7 @@ impl Cipher {
"Favorite": self.favorite,
"OrganizationId": self.organization_uuid,
"Attachments": attachments_json,
"OrganizationUseTotp": false,
"OrganizationUseTotp": true,
"CollectionIds": self.get_collections(user_uuid, &conn),
"Name": self.name,
@@ -108,6 +114,8 @@ impl Cipher {
"Object": "cipher",
"Edit": true,
"PasswordHistory": password_history_json,
});
let key = match self.type_ {
@@ -122,7 +130,29 @@ impl Cipher {
json_object
}
pub fn update_users_revision(&self, conn: &DbConn) -> Vec<String> {
let mut user_uuids = Vec::new();
match self.user_uuid {
Some(ref user_uuid) => {
User::update_uuid_revision(&user_uuid, conn);
user_uuids.push(user_uuid.clone())
},
None => { // Belongs to Organization, need to update affected users
if let Some(ref org_uuid) = self.organization_uuid {
UserOrganization::find_by_cipher_and_org(&self.uuid, &org_uuid, conn)
.iter()
.for_each(|user_org| {
User::update_uuid_revision(&user_org.user_uuid, conn);
user_uuids.push(user_org.user_uuid.clone())
});
}
}
};
user_uuids
}
pub fn save(&mut self, conn: &DbConn) -> bool {
self.update_users_revision(conn);
self.updated_at = Utc::now().naive_utc();
match diesel::replace_into(ciphers::table)
@@ -133,14 +163,16 @@ impl Cipher {
}
}
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
pub fn delete(&self, conn: &DbConn) -> QueryResult<()> {
self.update_users_revision(conn);
FolderCipher::delete_all_by_cipher(&self.uuid, &conn)?;
CollectionCipher::delete_all_by_cipher(&self.uuid, &conn)?;
Attachment::delete_all_by_cipher(&self.uuid, &conn)?;
diesel::delete(
ciphers::table.filter(
ciphers::uuid.eq(self.uuid)
ciphers::uuid.eq(&self.uuid)
)
).execute(&**conn).and(Ok(()))
}
@@ -157,6 +189,7 @@ impl Cipher {
None => {
match folder_uuid {
Some(new_folder) => {
self.update_users_revision(conn);
let folder_cipher = FolderCipher::new(&new_folder, &self.uuid);
folder_cipher.save(&conn).or(Err("Couldn't save folder setting"))
},
@@ -169,6 +202,7 @@ impl Cipher {
if current_folder == new_folder {
Ok(()) //nothing to do
} else {
self.update_users_revision(conn);
match FolderCipher::find_by_folder_and_cipher(&current_folder, &self.uuid, &conn) {
Some(current_folder) => {
current_folder.delete(&conn).or(Err("Failed removing old folder mapping"))
@@ -181,6 +215,7 @@ impl Cipher {
}
},
None => {
self.update_users_revision(conn);
match FolderCipher::find_by_folder_and_cipher(&current_folder, &self.uuid, &conn) {
Some(current_folder) => {
current_folder.delete(&conn).or(Err("Failed removing old folder mapping"))
@@ -266,7 +301,9 @@ impl Cipher {
ciphers::table
.left_join(users_organizations::table.on(
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable()).and(
users_organizations::user_uuid.eq(user_uuid)
users_organizations::user_uuid.eq(user_uuid).and(
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
)
)
))
.left_join(ciphers_collections::table)
@@ -325,6 +362,6 @@ impl Cipher {
)
))
.select(ciphers_collections::collection_uuid)
.load::<String>(&**conn).unwrap_or(vec![])
.load::<String>(&**conn).unwrap_or_default()
}
}

View File

@@ -2,7 +2,7 @@ use serde_json::Value as JsonValue;
use uuid::Uuid;
use super::{Organization, UserOrganization, UserOrgType};
use super::{Organization, UserOrganization, UserOrgType, UserOrgStatus};
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
#[table_name = "collections"]
@@ -78,13 +78,18 @@ impl Collection {
pub fn find_by_user_uuid(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
let mut all_access_collections = users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
.filter(users_organizations::access_all.eq(true))
.inner_join(collections::table.on(collections::org_uuid.eq(users_organizations::org_uuid)))
.select(collections::all_columns)
.load::<Self>(&**conn).expect("Error loading collections");
let mut assigned_collections = users_collections::table.inner_join(collections::table)
.left_join(users_organizations::table.on(
users_collections::user_uuid.eq(users_organizations::user_uuid)
))
.filter(users_collections::user_uuid.eq(user_uuid))
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
.select(collections::all_columns)
.load::<Self>(&**conn).expect("Error loading collections");
@@ -180,6 +185,8 @@ impl CollectionUser {
}
pub fn save(user_uuid: &str, collection_uuid: &str, read_only:bool, conn: &DbConn) -> QueryResult<()> {
User::update_uuid_revision(&user_uuid, conn);
diesel::replace_into(users_collections::table)
.values((
users_collections::user_uuid.eq(user_uuid),
@@ -189,6 +196,8 @@ impl CollectionUser {
}
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
User::update_uuid_revision(&self.user_uuid, conn);
diesel::delete(users_collections::table
.filter(users_collections::user_uuid.eq(&self.user_uuid))
.filter(users_collections::collection_uuid.eq(&self.collection_uuid)))
@@ -211,12 +220,20 @@ impl CollectionUser {
}
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> QueryResult<()> {
CollectionUser::find_by_collection(&collection_uuid, conn)
.iter()
.for_each(|collection| {
User::update_uuid_revision(&collection.user_uuid, conn)
});
diesel::delete(users_collections::table
.filter(users_collections::collection_uuid.eq(collection_uuid))
).execute(&**conn).and(Ok(()))
}
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> QueryResult<()> {
User::update_uuid_revision(&user_uuid, conn);
diesel::delete(users_collections::table
.filter(users_collections::user_uuid.eq(user_uuid))
).execute(&**conn).and(Ok(()))

View File

@@ -43,11 +43,14 @@ impl Device {
}
}
pub fn refresh_twofactor_remember(&mut self) {
pub fn refresh_twofactor_remember(&mut self) -> String {
use data_encoding::BASE64;
use crypto;
self.twofactor_remember = Some(BASE64.encode(&crypto::get_random(vec![0u8; 180])));
let twofactor_remember = BASE64.encode(&crypto::get_random(vec![0u8; 180]));
self.twofactor_remember = Some(twofactor_remember.clone());
twofactor_remember
}
pub fn delete_twofactor_remember(&mut self) {

View File

@@ -71,6 +71,7 @@ use db::schema::{folders, folders_ciphers};
/// Database methods
impl Folder {
pub fn save(&mut self, conn: &DbConn) -> bool {
User::update_uuid_revision(&self.user_uuid, conn);
self.updated_at = Utc::now().naive_utc();
match diesel::replace_into(folders::table)
@@ -81,12 +82,13 @@ impl Folder {
}
}
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
pub fn delete(&self, conn: &DbConn) -> QueryResult<()> {
User::update_uuid_revision(&self.user_uuid, conn);
FolderCipher::delete_all_by_folder(&self.uuid, &conn)?;
diesel::delete(
folders::table.filter(
folders::uuid.eq(self.uuid)
folders::uuid.eq(&self.uuid)
)
).execute(&**conn).and(Ok(()))
}

View File

@@ -6,12 +6,14 @@ mod user;
mod collection;
mod organization;
mod two_factor;
pub use self::attachment::Attachment;
pub use self::cipher::Cipher;
pub use self::device::Device;
pub use self::folder::{Folder, FolderCipher};
pub use self::user::User;
pub use self::user::{User, Invitation};
pub use self::organization::Organization;
pub use self::organization::{UserOrganization, UserOrgStatus, UserOrgType};
pub use self::collection::{Collection, CollectionUser, CollectionCipher};
pub use self::two_factor::{TwoFactor, TwoFactorType};

View File

@@ -1,6 +1,7 @@
use serde_json::Value as JsonValue;
use uuid::Uuid;
use super::{User, CollectionUser};
#[derive(Debug, Identifiable, Queryable, Insertable)]
#[table_name = "organizations"]
@@ -26,7 +27,7 @@ pub struct UserOrganization {
}
pub enum UserOrgStatus {
_Invited = 0, // Unused, users are accepted automatically
Invited = 0,
Accepted = 1,
Confirmed = 2,
}
@@ -66,11 +67,11 @@ impl Organization {
"Seats": 10,
"MaxCollections": 10,
"Use2fa": false,
"Use2fa": true,
"UseDirectory": false,
"UseEvents": false,
"UseGroups": false,
"UseTotp": false,
"UseTotp": true,
"BusinessName": null,
"BusinessAddress1": null,
@@ -80,8 +81,8 @@ impl Organization {
"BusinessTaxNumber": null,
"BillingEmail": self.billing_email,
"Plan": "Free",
"PlanType": 0, // Free plan
"Plan": "TeamsAnnually",
"PlanType": 5, // TeamsAnnually plan
"Object": "organization",
})
@@ -108,12 +109,17 @@ impl UserOrganization {
use diesel;
use diesel::prelude::*;
use db::DbConn;
use db::schema::organizations;
use db::schema::users_organizations;
use db::schema::{organizations, users_organizations, users_collections, ciphers_collections};
/// Database methods
impl Organization {
pub fn save(&mut self, conn: &DbConn) -> bool {
UserOrganization::find_by_org(&self.uuid, conn)
.iter()
.for_each(|user_org| {
User::update_uuid_revision(&user_org.user_uuid, conn);
});
match diesel::replace_into(organizations::table)
.values(&*self)
.execute(&**conn) {
@@ -153,11 +159,11 @@ impl UserOrganization {
"Seats": 10,
"MaxCollections": 10,
"Use2fa": false,
"Use2fa": true,
"UseDirectory": false,
"UseEvents": false,
"UseGroups": false,
"UseTotp": false,
"UseTotp": true,
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
@@ -172,7 +178,6 @@ impl UserOrganization {
}
pub fn to_json_user_details(&self, conn: &DbConn) -> JsonValue {
use super::User;
let user = User::find_by_uuid(&self.user_uuid, conn).unwrap();
json!({
@@ -189,8 +194,7 @@ impl UserOrganization {
})
}
pub fn to_json_collection_user_details(&self, read_only: &bool, conn: &DbConn) -> JsonValue {
use super::User;
pub fn to_json_collection_user_details(&self, read_only: bool, conn: &DbConn) -> JsonValue {
let user = User::find_by_uuid(&self.user_uuid, conn).unwrap();
json!({
@@ -209,7 +213,6 @@ impl UserOrganization {
let coll_uuids = if self.access_all {
vec![] // If we have complete access, no need to fill the array
} else {
use super::CollectionUser;
let collections = CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn);
collections.iter().map(|c| json!({"Id": c.collection_uuid, "ReadOnly": c.read_only})).collect()
};
@@ -228,6 +231,8 @@ impl UserOrganization {
}
pub fn save(&mut self, conn: &DbConn) -> bool {
User::update_uuid_revision(&self.user_uuid, conn);
match diesel::replace_into(users_organizations::table)
.values(&*self)
.execute(&**conn) {
@@ -237,7 +242,7 @@ impl UserOrganization {
}
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
use super::CollectionUser;
User::update_uuid_revision(&self.user_uuid, conn);
CollectionUser::delete_all_by_user(&self.user_uuid, &conn)?;
@@ -265,10 +270,25 @@ impl UserOrganization {
.first::<Self>(&**conn).ok()
}
pub fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
users_organizations::table
.filter(users_organizations::uuid.eq(uuid))
.filter(users_organizations::org_uuid.eq(org_uuid))
.first::<Self>(&**conn).ok()
}
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
.load::<Self>(&**conn).unwrap_or(vec![])
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
.load::<Self>(&**conn).unwrap_or_default()
}
pub fn find_invited_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
.filter(users_organizations::status.eq(UserOrgStatus::Invited as i32))
.load::<Self>(&**conn).unwrap_or_default()
}
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
@@ -290,6 +310,26 @@ impl UserOrganization {
.filter(users_organizations::org_uuid.eq(org_uuid))
.first::<Self>(&**conn).ok()
}
pub fn find_by_cipher_and_org(cipher_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec<Self> {
users_organizations::table
.filter(users_organizations::org_uuid.eq(org_uuid))
.left_join(users_collections::table.on(
users_collections::user_uuid.eq(users_organizations::user_uuid)
))
.left_join(ciphers_collections::table.on(
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid).and(
ciphers_collections::cipher_uuid.eq(&cipher_uuid)
)
))
.filter(
users_organizations::access_all.eq(true).or( // AccessAll..
ciphers_collections::cipher_uuid.eq(&cipher_uuid) // ..or access to collection with cipher
)
)
.select(users_organizations::all_columns)
.load::<Self>(&**conn).expect("Error loading user organizations")
}
}

112
src/db/models/two_factor.rs Normal file
View File

@@ -0,0 +1,112 @@
use serde_json::Value as JsonValue;
use uuid::Uuid;
use super::User;
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
#[table_name = "twofactor"]
#[belongs_to(User, foreign_key = "user_uuid")]
#[primary_key(uuid)]
pub struct TwoFactor {
pub uuid: String,
pub user_uuid: String,
pub type_: i32,
pub enabled: bool,
pub data: String,
}
#[allow(dead_code)]
#[derive(FromPrimitive, ToPrimitive)]
pub enum TwoFactorType {
Authenticator = 0,
Email = 1,
Duo = 2,
YubiKey = 3,
U2f = 4,
Remember = 5,
OrganizationDuo = 6,
// These are implementation details
U2fRegisterChallenge = 1000,
U2fLoginChallenge = 1001,
}
/// Local methods
impl TwoFactor {
pub fn new(user_uuid: String, type_: TwoFactorType, data: String) -> Self {
Self {
uuid: Uuid::new_v4().to_string(),
user_uuid,
type_: type_ as i32,
enabled: true,
data,
}
}
pub fn check_totp_code(&self, totp_code: u64) -> bool {
let totp_secret = self.data.as_bytes();
use data_encoding::BASE32;
use oath::{totp_raw_now, HashType};
let decoded_secret = match BASE32.decode(totp_secret) {
Ok(s) => s,
Err(_) => return false
};
let generated = totp_raw_now(&decoded_secret, 6, 0, 30, &HashType::SHA1);
generated == totp_code
}
pub fn to_json(&self) -> JsonValue {
json!({
"Enabled": self.enabled,
"Key": "", // This key and value vary
"Object": "twoFactorAuthenticator" // This value varies
})
}
pub fn to_json_list(&self) -> JsonValue {
json!({
"Enabled": self.enabled,
"Type": self.type_,
"Object": "twoFactorProvider"
})
}
}
use diesel;
use diesel::prelude::*;
use db::DbConn;
use db::schema::twofactor;
/// Database methods
impl TwoFactor {
pub fn save(&self, conn: &DbConn) -> QueryResult<usize> {
diesel::replace_into(twofactor::table)
.values(self)
.execute(&**conn)
}
pub fn delete(self, conn: &DbConn) -> QueryResult<usize> {
diesel::delete(
twofactor::table.filter(
twofactor::uuid.eq(self.uuid)
)
).execute(&**conn)
}
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
twofactor::table
.filter(twofactor::user_uuid.eq(user_uuid))
.load::<Self>(&**conn).expect("Error loading twofactor")
}
pub fn find_by_user_and_type(user_uuid: &str, type_: i32, conn: &DbConn) -> Option<Self> {
twofactor::table
.filter(twofactor::user_uuid.eq(user_uuid))
.filter(twofactor::type_.eq(type_))
.first::<Self>(&**conn).ok()
}
}

View File

@@ -27,36 +27,39 @@ pub struct User {
pub private_key: Option<String>,
pub public_key: Option<String>,
pub totp_secret: Option<String>,
#[column_name = "totp_secret"]
_totp_secret: Option<String>,
pub totp_recover: Option<String>,
pub security_stamp: String,
pub equivalent_domains: String,
pub excluded_globals: String,
pub client_kdf_type: i32,
pub client_kdf_iter: i32,
}
/// Local methods
impl User {
pub fn new(mail: String, key: String, password: String) -> Self {
pub const CLIENT_KDF_TYPE_DEFAULT: i32 = 0; // PBKDF2: 0
pub const CLIENT_KDF_ITER_DEFAULT: i32 = 5_000;
pub fn new(mail: String) -> Self {
let now = Utc::now().naive_utc();
let email = mail.to_lowercase();
let iterations = CONFIG.password_iterations;
let salt = crypto::get_random_64();
let password_hash = crypto::hash_password(password.as_bytes(), &salt, iterations as u32);
Self {
uuid: Uuid::new_v4().to_string(),
created_at: now,
updated_at: now,
name: email.clone(),
email,
key,
key: String::new(),
password_hash,
salt,
password_iterations: iterations,
password_hash: Vec::new(),
salt: crypto::get_random_64(),
password_iterations: CONFIG.password_iterations,
security_stamp: Uuid::new_v4().to_string(),
@@ -64,11 +67,14 @@ impl User {
private_key: None,
public_key: None,
totp_secret: None,
_totp_secret: None,
totp_recover: None,
equivalent_domains: "[]".to_string(),
excluded_globals: "[]".to_string(),
client_kdf_type: Self::CLIENT_KDF_TYPE_DEFAULT,
client_kdf_iter: Self::CLIENT_KDF_ITER_DEFAULT,
}
}
@@ -97,43 +103,24 @@ impl User {
pub fn reset_security_stamp(&mut self) {
self.security_stamp = Uuid::new_v4().to_string();
}
pub fn requires_twofactor(&self) -> bool {
self.totp_secret.is_some()
}
pub fn check_totp_code(&self, totp_code: u64) -> bool {
if let Some(ref totp_secret) = self.totp_secret {
// Validate totp
use data_encoding::BASE32;
use oath::{totp_raw_now, HashType};
let decoded_secret = match BASE32.decode(totp_secret.as_bytes()) {
Ok(s) => s,
Err(_) => return false
};
let generated = totp_raw_now(&decoded_secret, 6, 0, 30, &HashType::SHA1);
generated == totp_code
} else {
true
}
}
}
use diesel;
use diesel::prelude::*;
use db::DbConn;
use db::schema::users;
use db::schema::{users, invitations};
/// Database methods
impl User {
pub fn to_json(&self, conn: &DbConn) -> JsonValue {
use super::UserOrganization;
use super::TwoFactor;
let orgs = UserOrganization::find_by_user(&self.uuid, conn);
let orgs_json: Vec<JsonValue> = orgs.iter().map(|c| c.to_json(&conn)).collect();
let twofactor_enabled = !TwoFactor::find_by_user(&self.uuid, conn).is_empty();
json!({
"Id": self.uuid,
"Name": self.name,
@@ -142,7 +129,7 @@ impl User {
"Premium": true,
"MasterPasswordHint": self.password_hint,
"Culture": "en-US",
"TwoFactorEnabled": self.totp_secret.is_some(),
"TwoFactorEnabled": twofactor_enabled,
"Key": self.key,
"PrivateKey": self.private_key,
"SecurityStamp": self.security_stamp,
@@ -172,6 +159,25 @@ impl User {
}
}
pub fn update_uuid_revision(uuid: &str, conn: &DbConn) {
if let Some(mut user) = User::find_by_uuid(&uuid, conn) {
if user.update_revision(conn).is_err(){
println!("Warning: Failed to update revision for {}", user.email);
};
};
}
pub fn update_revision(&mut self, conn: &DbConn) -> QueryResult<()> {
self.updated_at = Utc::now().naive_utc();
diesel::update(
users::table.filter(
users::uuid.eq(&self.uuid)
)
)
.set(users::updated_at.eq(&self.updated_at))
.execute(&**conn).and(Ok(()))
}
pub fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
let lower_mail = mail.to_lowercase();
users::table
@@ -185,3 +191,47 @@ impl User {
.first::<Self>(&**conn).ok()
}
}
#[derive(Debug, Identifiable, Queryable, Insertable)]
#[table_name = "invitations"]
#[primary_key(email)]
pub struct Invitation {
pub email: String,
}
impl Invitation {
pub fn new(email: String) -> Self {
Self {
email
}
}
pub fn save(&mut self, conn: &DbConn) -> QueryResult<()> {
diesel::replace_into(invitations::table)
.values(&*self)
.execute(&**conn)
.and(Ok(()))
}
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
diesel::delete(invitations::table.filter(
invitations::email.eq(self.email)))
.execute(&**conn)
.and(Ok(()))
}
pub fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
let lower_mail = mail.to_lowercase();
invitations::table
.filter(invitations::email.eq(lower_mail))
.first::<Self>(&**conn).ok()
}
pub fn take(mail: &str, conn: &DbConn) -> bool {
CONFIG.invitations_allowed &&
match Self::find_by_mail(mail, &conn) {
Some(invitation) => invitation.delete(&conn).is_ok(),
None => false
}
}
}

View File

@@ -21,6 +21,7 @@ table! {
fields -> Nullable<Text>,
data -> Text,
favorite -> Bool,
password_history -> Nullable<Text>,
}
}
@@ -71,6 +72,12 @@ table! {
}
}
table! {
invitations (email) {
email -> Text,
}
}
table! {
organizations (uuid) {
uuid -> Text,
@@ -79,6 +86,17 @@ table! {
}
}
table! {
twofactor (uuid) {
uuid -> Text,
user_uuid -> Text,
#[sql_name = "type"]
type_ -> Integer,
enabled -> Bool,
data -> Text,
}
}
table! {
users (uuid) {
uuid -> Text,
@@ -98,6 +116,8 @@ table! {
security_stamp -> Text,
equivalent_domains -> Text,
excluded_globals -> Text,
client_kdf_type -> Integer,
client_kdf_iter -> Integer,
}
}
@@ -132,6 +152,7 @@ joinable!(devices -> users (user_uuid));
joinable!(folders -> users (user_uuid));
joinable!(folders_ciphers -> ciphers (cipher_uuid));
joinable!(folders_ciphers -> folders (folder_uuid));
joinable!(twofactor -> users (user_uuid));
joinable!(users_collections -> collections (collection_uuid));
joinable!(users_collections -> users (user_uuid));
joinable!(users_organizations -> organizations (org_uuid));
@@ -145,7 +166,9 @@ allow_tables_to_appear_in_same_query!(
devices,
folders,
folders_ciphers,
invitations,
organizations,
twofactor,
users,
users_collections,
users_organizations,

63
src/mail.rs Normal file
View File

@@ -0,0 +1,63 @@
use std::error::Error;
use native_tls::{Protocol, TlsConnector};
use lettre::{Transport, SmtpTransport, SmtpClient, ClientTlsParameters, ClientSecurity};
use lettre::smtp::ConnectionReuseParameters;
use lettre::smtp::authentication::Credentials;
use lettre_email::EmailBuilder;
use MailConfig;
fn mailer(config: &MailConfig) -> SmtpTransport {
let client_security = if config.smtp_ssl {
let mut tls_builder = TlsConnector::builder();
tls_builder.min_protocol_version(Some(Protocol::Tlsv11));
ClientSecurity::Required(
ClientTlsParameters::new(config.smtp_host.to_owned(), tls_builder.build().unwrap())
)
} else {
ClientSecurity::None
};
let smtp_client = SmtpClient::new(
(config.smtp_host.to_owned().as_str(), config.smtp_port),
client_security
).unwrap();
let smtp_client = match (&config.smtp_username, &config.smtp_password) {
(Some(username), Some(password)) => {
smtp_client.credentials(Credentials::new(username.to_owned(), password.to_owned()))
},
(_, _) => smtp_client,
};
smtp_client
.smtp_utf8(true)
.connection_reuse(ConnectionReuseParameters::NoReuse)
.transport()
}
pub fn send_password_hint(address: &str, hint: Option<String>, config: &MailConfig) -> Result<(), String> {
let (subject, body) = if let Some(hint) = hint {
("Your master password hint",
format!(
"You (or someone) recently requested your master password hint.\n\n\
Your hint is: \"{}\"\n\n\
If you did not request your master password hint you can safely ignore this email.\n",
hint))
} else {
("Sorry, you have no password hint...",
"Sorry, you have not specified any password hint...\n".to_string())
};
let email = EmailBuilder::new()
.to(address)
.from((config.smtp_from.to_owned(), "Bitwarden-rs"))
.subject(subject)
.body(body)
.build().unwrap();
match mailer(config).send(email.into()) {
Ok(_) => Ok(()),
Err(e) => Err(e.description().to_string()),
}
}

View File

@@ -1,9 +1,13 @@
#![feature(plugin, custom_derive)]
#![feature(plugin, custom_derive, vec_remove_item, try_trait)]
#![plugin(rocket_codegen)]
#![allow(proc_macro_derive_resolution_fallback)] // TODO: Remove this when diesel update fixes warnings
extern crate rocket;
extern crate rocket_contrib;
extern crate reqwest;
extern crate multipart;
extern crate ws;
extern crate rmpv;
extern crate chashmap;
extern crate serde;
#[macro_use]
extern crate serde_derive;
@@ -19,11 +23,20 @@ extern crate chrono;
extern crate oath;
extern crate data_encoding;
extern crate jsonwebtoken as jwt;
extern crate u2f;
extern crate dotenv;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate num_derive;
extern crate num_traits;
extern crate lettre;
extern crate lettre_email;
extern crate native_tls;
extern crate fast_chemail;
extern crate byteorder;
use std::{io, env, path::Path, process::{exit, Command}};
use std::{path::Path, process::{exit, Command}};
use rocket::Rocket;
#[macro_use]
@@ -33,6 +46,7 @@ mod api;
mod db;
mod crypto;
mod auth;
mod mail;
fn init_rocket() -> Rocket {
rocket::ignite()
@@ -40,22 +54,32 @@ fn init_rocket() -> Rocket {
.mount("/api", api::core_routes())
.mount("/identity", api::identity_routes())
.mount("/icons", api::icons_routes())
.mount("/notifications", api::notifications_routes())
.manage(db::init_pool())
.manage(api::start_notification_server())
}
// Embed the migrations from the migrations folder into the application
// This way, the program automatically migrates the database to the latest version
// https://docs.rs/diesel_migrations/*/diesel_migrations/macro.embed_migrations.html
embed_migrations!();
#[allow(unused_imports)]
mod migrations {
embed_migrations!();
pub fn run_migrations() {
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
let connection = ::db::get_connection().expect("Can't conect to DB");
use std::io::stdout;
embedded_migrations::run_with_output(&connection, &mut stdout()).expect("Can't run migrations");
}
}
fn main() {
check_db();
check_rsa_keys();
check_web_vault();
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
let connection = db::get_connection().expect("Can't conect to DB");
embedded_migrations::run_with_output(&connection, &mut io::stdout()).expect("Can't run migrations");
check_web_vault();
migrations::run_migrations();
init_rocket().launch();
}
@@ -70,6 +94,11 @@ fn check_db() {
exit(1);
}
}
// Turn on WAL in SQLite
use diesel::RunQueryDsl;
let connection = db::get_connection().expect("Can't conect to DB");
diesel::sql_query("PRAGMA journal_mode=wal").execute(&connection).expect("Failed to turn on WAL");
}
fn check_rsa_keys() {
@@ -118,6 +147,10 @@ fn check_rsa_keys() {
}
fn check_web_vault() {
if !CONFIG.web_vault_enabled {
return;
}
let index_path = Path::new(&CONFIG.web_vault_folder).join("index.html");
if !index_path.exists() {
@@ -131,6 +164,61 @@ lazy_static! {
static ref CONFIG: Config = Config::load();
}
#[derive(Debug)]
pub struct MailConfig {
smtp_host: String,
smtp_port: u16,
smtp_ssl: bool,
smtp_from: String,
smtp_username: Option<String>,
smtp_password: Option<String>,
}
impl MailConfig {
fn load() -> Option<Self> {
use util::{get_env, get_env_or};
// When SMTP_HOST is absent, we assume the user does not want to enable it.
let smtp_host = match get_env("SMTP_HOST") {
Some(host) => host,
None => return None,
};
let smtp_from = get_env("SMTP_FROM").unwrap_or_else(|| {
println!("Please specify SMTP_FROM to enable SMTP support.");
exit(1);
});
let smtp_ssl = get_env_or("SMTP_SSL", true);
let smtp_port = get_env("SMTP_PORT").unwrap_or_else(||
if smtp_ssl {
587u16
} else {
25u16
}
);
let smtp_username = get_env("SMTP_USERNAME");
let smtp_password = get_env("SMTP_PASSWORD").or_else(|| {
if smtp_username.as_ref().is_some() {
println!("SMTP_PASSWORD is mandatory when specifying SMTP_USERNAME.");
exit(1);
} else {
None
}
});
Some(MailConfig {
smtp_host,
smtp_port,
smtp_ssl,
smtp_from,
smtp_username,
smtp_password,
})
}
}
#[derive(Debug)]
pub struct Config {
database_url: String,
@@ -142,31 +230,56 @@ pub struct Config {
public_rsa_key: String,
web_vault_folder: String,
web_vault_enabled: bool,
websocket_port: i32,
local_icon_extractor: bool,
signups_allowed: bool,
invitations_allowed: bool,
password_iterations: i32,
show_password_hint: bool,
domain: String,
domain_set: bool,
mail: Option<MailConfig>,
}
impl Config {
fn load() -> Self {
use util::{get_env, get_env_or};
dotenv::dotenv().ok();
let df = env::var("DATA_FOLDER").unwrap_or("data".into());
let key = env::var("RSA_KEY_NAME").unwrap_or("rsa_key".into());
let df = get_env_or("DATA_FOLDER", "data".to_string());
let key = get_env_or("RSA_KEY_FILENAME", format!("{}/{}", &df, "rsa_key"));
let domain = get_env("DOMAIN");
Config {
database_url: env::var("DATABASE_URL").unwrap_or(format!("{}/{}", &df, "db.sqlite3")),
icon_cache_folder: env::var("ICON_CACHE_FOLDER").unwrap_or(format!("{}/{}", &df, "icon_cache")),
attachments_folder: env::var("ATTACHMENTS_FOLDER").unwrap_or(format!("{}/{}", &df, "attachments")),
database_url: get_env_or("DATABASE_URL", format!("{}/{}", &df, "db.sqlite3")),
icon_cache_folder: get_env_or("ICON_CACHE_FOLDER", format!("{}/{}", &df, "icon_cache")),
attachments_folder: get_env_or("ATTACHMENTS_FOLDER", format!("{}/{}", &df, "attachments")),
private_rsa_key: format!("{}/{}.der", &df, &key),
private_rsa_key_pem: format!("{}/{}.pem", &df, &key),
public_rsa_key: format!("{}/{}.pub.der", &df, &key),
private_rsa_key: format!("{}.der", &key),
private_rsa_key_pem: format!("{}.pem", &key),
public_rsa_key: format!("{}.pub.der", &key),
web_vault_folder: env::var("WEB_VAULT_FOLDER").unwrap_or("web-vault/".into()),
web_vault_folder: get_env_or("WEB_VAULT_FOLDER", "web-vault/".into()),
web_vault_enabled: get_env_or("WEB_VAULT_ENABLED", true),
signups_allowed: util::parse_option_string(env::var("SIGNUPS_ALLOWED").ok()).unwrap_or(false),
password_iterations: util::parse_option_string(env::var("PASSWORD_ITERATIONS").ok()).unwrap_or(100_000),
websocket_port: get_env_or("WEBSOCKET_PORT", 3012),
local_icon_extractor: get_env_or("LOCAL_ICON_EXTRACTOR", false),
signups_allowed: get_env_or("SIGNUPS_ALLOWED", true),
invitations_allowed: get_env_or("INVITATIONS_ALLOWED", true),
password_iterations: get_env_or("PASSWORD_ITERATIONS", 100_000),
show_password_hint: get_env_or("SHOW_PASSWORD_HINT", true),
domain_set: domain.is_some(),
domain: domain.unwrap_or("http://localhost".into()),
mail: MailConfig::load(),
}
}
}

View File

@@ -3,24 +3,25 @@
///
#[macro_export]
macro_rules! err {
($err:expr, $err_desc:expr, $msg:expr) => {
($err:expr, $msg:expr) => {{
println!("ERROR: {}", $msg);
err_json!(json!({
"error": $err,
"error_description": $err_desc,
"ErrorModel": {
"Message": $msg,
"ValidationErrors": null,
"Object": "error"
}
"Message": $err,
"ValidationErrors": {
"": [$msg,],
},
"ExceptionMessage": null,
"ExceptionStackTrace": null,
"InnerExceptionMessage": null,
"Object": "error",
}))
};
($msg:expr) => { err!("default_error", "default_error_description", $msg) }
}};
($msg:expr) => { err!("The model state is invalid", $msg) }
}
#[macro_export]
macro_rules! err_json {
($expr:expr) => {{
println!("ERROR: {}", $expr);
return Err($crate::rocket::response::status::BadRequest(Some($crate::rocket_contrib::Json($expr))));
}}
}
@@ -70,7 +71,7 @@ pub fn delete_file(path: &str) -> bool {
}
const UNITS: [&'static str; 6] = ["bytes", "KB", "MB", "GB", "TB", "PB"];
const UNITS: [&str; 6] = ["bytes", "KB", "MB", "GB", "TB", "PB"];
pub fn get_display_size(size: i32) -> String {
let mut size = size as f64;
@@ -96,6 +97,7 @@ pub fn get_display_size(size: i32) -> String {
///
use std::str::FromStr;
use std::ops::Try;
pub fn upcase_first(s: &str) -> String {
let mut c = s.chars();
@@ -105,21 +107,44 @@ pub fn upcase_first(s: &str) -> String {
}
}
pub fn parse_option_string<S, T>(string: Option<S>) -> Option<T> where S: AsRef<str>, T: FromStr {
if let Some(Ok(value)) = string.map(|s| s.as_ref().parse::<T>()) {
pub fn try_parse_string<S, T, U>(string: impl Try<Ok = S, Error=U>) -> Option<T> where S: AsRef<str>, T: FromStr {
if let Ok(Ok(value)) = string.into_result().map(|s| s.as_ref().parse::<T>()) {
Some(value)
} else {
None
}
}
pub fn try_parse_string_or<S, T, U>(string: impl Try<Ok = S, Error=U>, default: T) -> T where S: AsRef<str>, T: FromStr {
if let Ok(Ok(value)) = string.into_result().map(|s| s.as_ref().parse::<T>()) {
value
} else {
default
}
}
///
/// Env methods
///
use std::env;
pub fn get_env<V>(key: &str) -> Option<V> where V: FromStr {
try_parse_string(env::var(key))
}
pub fn get_env_or<V>(key: &str, default: V) -> V where V: FromStr {
try_parse_string_or(env::var(key), default)
}
///
/// Date util methods
///
use chrono::NaiveDateTime;
const DATETIME_FORMAT: &'static str = "%Y-%m-%dT%H:%M:%S%.6fZ";
const DATETIME_FORMAT: &str = "%Y-%m-%dT%H:%M:%S%.6fZ";
pub fn format_date(date: &NaiveDateTime) -> String {
date.format(DATETIME_FORMAT).to_string()
@@ -129,20 +154,12 @@ pub fn format_date(date: &NaiveDateTime) -> String {
/// Deserialization methods
///
use std::collections::BTreeMap as Map;
use std::fmt;
use serde::de::{self, Deserialize, DeserializeOwned, Deserializer};
use serde_json::Value;
use serde::de::{self, DeserializeOwned, Deserializer, MapAccess, SeqAccess, Visitor};
use serde_json::{self, Value};
/// https://github.com/serde-rs/serde/issues/586
pub fn upcase_deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where T: DeserializeOwned,
D: Deserializer<'de>
{
let map = Map::<String, Value>::deserialize(deserializer)?;
let lower = map.into_iter().map(|(k, v)| (upcase_first(&k), v)).collect();
T::deserialize(Value::Object(lower)).map_err(de::Error::custom)
}
pub type JsonMap = serde_json::Map<String, Value>;
#[derive(PartialEq, Serialize, Deserialize)]
pub struct UpCase<T: DeserializeOwned> {
@@ -150,3 +167,76 @@ pub struct UpCase<T: DeserializeOwned> {
#[serde(flatten)]
pub data: T,
}
/// https://github.com/serde-rs/serde/issues/586
pub fn upcase_deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where T: DeserializeOwned,
D: Deserializer<'de>
{
let d = deserializer.deserialize_any(UpCaseVisitor)?;
T::deserialize(d).map_err(de::Error::custom)
}
struct UpCaseVisitor;
impl<'de> Visitor<'de> for UpCaseVisitor {
type Value = Value;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("an object or an array")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where A: MapAccess<'de>
{
let mut result_map = JsonMap::new();
while let Some((key, value)) = map.next_entry()? {
result_map.insert(upcase_first(key), upcase_value(&value));
}
Ok(Value::Object(result_map))
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where A: SeqAccess<'de> {
let mut result_seq = Vec::<Value>::new();
while let Some(value) = seq.next_element()? {
result_seq.push(upcase_value(&value));
}
Ok(Value::Array(result_seq))
}
}
fn upcase_value(value: &Value) -> Value {
if let Some(map) = value.as_object() {
let mut new_value = json!({});
for (key, val) in map {
let processed_key = _process_key(key);
new_value[processed_key] = upcase_value(val);
}
new_value
} else if let Some(array) = value.as_array() {
// Initialize array with null values
let mut new_value = json!(vec![Value::Null; array.len()]);
for (index, val) in array.iter().enumerate() {
new_value[index] = upcase_value(val);
}
new_value
} else {
value.clone()
}
}
fn _process_key(key: &str) -> String {
match key.to_lowercase().as_ref() {
"ssn" => "SSN".into(),
_ => self::upcase_first(key)
}
}