Compare commits

..

214 Commits

Author SHA1 Message Date
Daniel García
b4b62c22a4 Merge pull request #648 from BlackDex/icon-security
Added missing .env configuration option.
2019-10-08 18:08:32 +02:00
BlackDex
05569147af Added missing .env configuration option. 2019-10-08 13:30:17 +02:00
Daniel García
99a635d327 Merge pull request #643 from BlackDex/icon-security
Updated icon blacklisting.
2019-10-05 17:06:14 +02:00
Daniel García
e6b763026e Merge branch 'master' into icon-security 2019-10-05 16:45:36 +02:00
Daniel García
c182583e09 Merge pull request #644 from BlackDex/issue-565
Fixed issue #565
2019-10-05 16:17:43 +02:00
Daniel García
d821389c2e Merge pull request #639 from vverst/cors-update
Change CORS headers
2019-10-05 16:09:33 +02:00
BlackDex
be2916333b Fixed issue #565
Issue fixed by omitting the cookie header when cookie_str is empty
2019-10-05 15:45:09 +02:00
BlackDex
9124d8a3fb Updated icon blacklisting.
- Blacklisting was not effective for redirects and rel href
- Able to blacklist non global IP's like RFC1918, multicast etc...
2019-10-05 14:48:15 +02:00
vpl
7b1da527a6 Change CORS headers
Only add Allow-Origin to all requests and move the others to preflight OPTIONS request.
If Origin is `file://` change it to the wildcard.
2019-10-01 20:12:33 +02:00
Daniel García
e7b8602e1f Merge pull request #638 from mprasil/add_sqlite_binary
Add sqlite binary into the docker images
2019-10-01 19:50:41 +02:00
Miro Prasil
d6e9af909b Remove the unnecessary check for sqlite
The binary we use is called `sqlite3` so no need to check for other
name variants as we won't use those anyways.
2019-10-01 10:40:22 +01:00
Miro Prasil
acdd42935b Add sqlite binary into the docker images
This is done to enable backup functionality in the admin interface while
we're waiting for the libsqlite-sys 0.17 to bubble up in the upstream
dependencies. Then we can start using `VACUUM INTO`

This also extends the check for the sqlite binary to also try `sqlite3`
as this is the name of the binary in baseimage distributions we use.
2019-09-30 13:54:06 +01:00
Daniel García
8367d1d715 Merge pull request #631 from vverst/cors-put
Use Access-Control-Allow-Method
2019-09-23 20:03:51 +02:00
vpl
56f12dc982 Use Access-Control-Allow-Method 2019-09-23 07:44:44 +02:00
Daniel García
4c07f05b3a Remove Result<T, E: Debug> in preparation of deprecation as Rocket responder.
Removed unnecessary returns
2019-09-17 21:05:56 +02:00
Daniel García
b73ff886c3 Use upstream rmp 2019-09-17 19:47:51 +02:00
Daniel García
2e7bd62353 Merge pull request #624 from swedishborgie/postgresql
Fix issue with downloading attachments and PostgreSQL backend.
2019-09-17 18:50:40 +02:00
Michael Powers
1264eb640a Added a migration that fixes #1 which caused attachments to be broken
for the PostgreSQL backend. Also converts any CHAR types to VARCHAR to prevent the same issue from causing problems down the line.
2019-09-16 19:52:00 -04:00
Daniel García
3a90364b32 Merge pull request #621 from swedishborgie/postgresql
Adds support for PostgreSQL which adds #87 and is mentioned in #246.
2019-09-16 20:05:05 +02:00
Michael Powers
f5f9861a78 Adds support for PostgreSQL which resolves #87 and is mentioned in #246.
This includes migrations as well as Dockerfile's for amd64.

The biggest change is that replace_into isn't supported by Diesel for the
PostgreSQL backend, instead requiring the use of on_conflict. This
unfortunately requires a branch for save() on all of the models currently
using replace_into.
2019-09-12 16:12:22 -04:00
Daniel García
f9408a00c6 Allow self signed certs and increase a bit the timings 2019-09-11 22:01:42 +02:00
Daniel García
ae8bf954c1 Updated web vault to 2.12 2019-09-07 22:13:56 +02:00
Daniel García
c656f2f694 Merge pull request #604 from mprasil/fix-healthcheck
Fix #603 and  remove mysql from sqlite image
2019-09-06 10:47:10 +02:00
Miro Prasil
eea3f13bb3 Fix #603 and remove mysql from sqlite image
This changes the healthcheck to use `sh` instead of bash, that is absent
from some image versions. (like alpine)

It also removes `*mariadb*` packages from runtime image of sqlite images
as these shouldn't be required.
2019-09-06 09:34:21 +01:00
Daniel García
df8114f8be Updated client kdf iterations to 100000 and fixed some lints 2019-09-05 21:56:12 +02:00
Daniel García
dda244edd8 Merge pull request #589 from H3npi/H3npi-patch-1
Adds Healthcheck for default docker container
2019-09-05 19:47:10 +02:00
H3npi
cce3ce816c Adds environment port to curl healthcheck 2019-09-04 09:12:53 +02:00
Daniel García
65c0d1064b Merge pull request #599 from vverst/cors
Add Cors headers
2019-09-03 20:22:54 +02:00
vpl
5a2f968d7a Set correct response headers, status code 2019-09-02 21:13:12 +02:00
vpl
16d88402cb Initial version of CORS support 2019-09-01 13:00:12 +02:00
Daniel García
7dcf18151d Fix onsubmit 2019-08-31 17:57:47 +02:00
Daniel García
e3404dd322 Use the local scripts instead of cloudflare, remove jquery and update config so disabling a master toggle doesn't remove the values 2019-08-31 17:47:52 +02:00
Daniel García
bfc517ee80 Remove unused warning 2019-08-31 17:26:16 +02:00
Daniel García
4a7d2a1e28 Rename static files endpoint 2019-08-31 17:25:31 +02:00
H3npi
66a68f6d22 Adds Healthcheck for all docker container 2019-08-29 09:02:02 +02:00
Daniel García
469318bcbd Updated dependencies and web vault version 2019-08-27 21:14:15 +02:00
Daniel García
c07c9995ea Merge pull request #555 from vverst/email-codes
Add Email 2FA login
2019-08-27 21:07:41 +02:00
Daniel García
2c2276c5bb Merge pull request #585 from ViViDboarder/mail-auth-over-insecure
Allow explicitly defined smtp auth mechansim
2019-08-27 20:21:23 +02:00
ViViDboarder
672a245548 Remove unecessary clone 2019-08-27 10:40:38 -07:00
vpl
5d50b1ee3c Merge remote-tracking branch 'upstream/master' into email-codes 2019-08-26 21:38:45 +02:00
vpl
c99df1c310 Compare token using crypto::ct_eq 2019-08-26 20:26:59 +02:00
vpl
591ae10144 Get token from single u64 2019-08-26 20:26:54 +02:00
ViViDboarder
2d2745195e Allow explicitly defined smtp auth mechansim 2019-08-23 16:22:14 -07:00
Daniel García
026f9da035 Allow removing users two factors 2019-08-21 17:13:06 +02:00
Daniel García
d23d4f2c1d Allow editing HIBP key in the admin panel 2019-08-20 23:53:00 +02:00
Daniel García
515b87755a Update HIBP to v3, requires paid API key, fixes #583 2019-08-20 20:07:12 +02:00
Daniel García
d8ea3d2bfe Merge pull request #582 from vverst/require-device-email-config
Add config option to require new device emails
2019-08-19 22:58:50 +02:00
vpl
ee7837d022 Add option to require new device emails 2019-08-19 22:14:00 +02:00
Daniel García
07743e490b Ignore error sending device email 2019-08-18 19:32:26 +02:00
Daniel García
9101d6e48f Update dependencies 2019-08-18 19:31:54 +02:00
Daniel García
27c23b60b8 Merge pull request #571 from BlackDex/icon-proxy-support
Added reqwest proxy support
2019-08-15 22:14:10 +02:00
BlackDex
e7b6238f43 Added reqwest proxy support 2019-08-12 17:24:32 +02:00
vpl
ad2225b6e5 Add configuration options for Email 2FA 2019-08-10 22:39:04 +02:00
vpl
5609103a97 Use ring to generate email token 2019-08-06 22:38:08 +02:00
vpl
6d460b44b0 Use saved token for email 2fa codes 2019-08-04 17:21:57 +02:00
vpl
efd8d9f528 Remove some unused imports, unneeded mut variables 2019-08-04 16:56:41 +02:00
vpl
29aedd388e Add email code logic and move two_factor into separate modules 2019-08-04 16:56:41 +02:00
vpl
27e0e41835 Add email authenticator logic 2019-08-04 16:56:39 +02:00
vpl
0b60f20eb3 Add email message for twofactor email codes 2019-08-03 18:49:34 +02:00
Daniel García
8be2ed6255 Update web vault to 2.11.0 2019-07-30 19:50:35 +02:00
Daniel García
c9c3f07171 Updated dependencies and fixed panic getting icons 2019-07-30 19:42:05 +02:00
Daniel García
8a21c6df10 Merge pull request #541 from vverst/mail-new-device
Add "New Device Logged In From" email
2019-07-30 13:18:42 -04:00
vpl
df71f57d86 Move send device email to end of password login
Send new device email after two factor authentication.
2019-07-25 21:10:27 +02:00
vpl
60e39a9dd1 Move retrieve/new device from connData to separate function 2019-07-22 12:30:26 +02:00
vpl
bc6a53b847 Add new device email when user logs in 2019-07-22 08:26:24 +02:00
Daniel García
05a1137828 Move backend checks to build.rs to fail fast, and updated dependencies 2019-07-09 17:26:34 +02:00
Daniel García
cef38bf40b Merge pull request #525 from fbartels/hadolint
use hadolint for linting Dockerfiles
2019-07-09 17:22:27 +02:00
Felix Bartels
0b13a8c4aa last round of linting fixes
Signed-off-by: Felix Bartels <felix@host-consultants.de>
2019-07-06 08:36:18 +02:00
Felix Bartels
3fbd7919d8 more linting fixes
Signed-off-by: Felix Bartels <felix@host-consultants.de>
2019-07-06 08:16:05 +02:00
Felix Bartels
5f688ff209 no more linting errors for the main Dockerfile
Signed-off-by: Felix Bartels <felix@host-consultants.de>
2019-07-05 22:45:29 +02:00
Felix Bartels
f6cfb5bf21 add hadolint config file
to globally ignore certain rules
2019-07-05 11:06:44 +02:00
Felix Bartels
df8c9f39ac add hadolint to travisfile
Signed-off-by: Felix Bartels <felix@host-consultants.de>
2019-07-04 15:59:50 +02:00
Daniel García
d7ee7caed4 Merge pull request #520 from njfox/fix-email-alias
Fix #468 - Percent-encode the email address in invite link
2019-07-03 22:42:42 +02:00
Nick Fox
2e300da057 Fix #468 - Percent-encode the email address in invite link 2019-07-02 22:55:13 -04:00
Daniel García
3fb63bbe8c Merge pull request #514 from mprasil/dockerfile_cleanup
Dockerfile cleanup
2019-06-26 17:20:10 +02:00
Miro Prasil
9671ed4cca Symlink amd64 Dockerfile to repo root 2019-06-24 09:59:43 +01:00
Miro Prasil
d10ef3fd4b Create Dockerfiles for mysql builds 2019-06-24 09:56:26 +01:00
Miro Prasil
dd0b847912 Move current dockerfiles to their arch folders 2019-06-24 09:52:55 +01:00
Daniel García
8c34ff5d23 Merge pull request #511 from CubityFirst/patch-1
Corrected Spelling
2019-06-18 18:28:00 +02:00
Daniel García
15750256e2 Merge pull request #510 from mprasil/armv6_fix
Making a symlink is no longer necessary
2019-06-18 18:27:47 +02:00
Cubity_First
6989fc7bdb Corrected Spelling
Changed it from Chache to Cache on Line 207
2019-06-18 15:45:19 +01:00
Miro Prasil
4923614730 Making a symlink is no longer necessary 2019-06-17 12:16:26 +01:00
Daniel García
76f38621de Update dependencies and remove unwraps from Cipher::to_json 2019-06-14 22:51:50 +02:00
Daniel García
fff72889f6 Document DB URL in .env file 2019-06-02 13:44:59 +02:00
Daniel García
12af32b9ea Don't print DB URL 2019-06-02 13:39:16 +02:00
Daniel García
9add8e19eb Update dependencies and remove travis unused feature 2019-06-02 00:28:20 +02:00
Daniel García
5710703c50 Make sure the backup option only appears when using sqlite 2019-06-02 00:08:52 +02:00
Daniel García
1322b876e9 Merge pull request #493 from endyman/feature/initial_mysql_support
Initial support for mysql
2019-06-01 23:33:06 +02:00
Daniel García
9ed2ba61c6 Merge pull request #475 from TheMardy/master
Create Backup funcitonality
2019-06-01 23:29:58 +02:00
Nils Domrose
62a461ae15 remove syslog from ci, make features flag more clear 2019-05-30 22:19:58 +02:00
Nils Domrose
6f7220b68e adapt other Dockerfiles 2019-05-28 11:56:49 +02:00
Nils Domrose
4859932d35 fixed typo 2019-05-28 07:48:17 +02:00
Nils Domrose
ee277de707 include libsqlite3-sys optionally, removed non common features 2019-05-27 23:31:56 +02:00
Nils Domrose
c11f47903a revert include libsqlite3-sys optionally 2019-05-27 23:18:45 +02:00
Nils Domrose
6a5f1613e7 include libsqlite3-sys optionally 2019-05-27 23:07:47 +02:00
Nils Domrose
dc36f0cb6c re-added sqlite check_db code, cleanup 2019-05-27 22:58:52 +02:00
Nils Domrose
6c38026ef5 user char(36) for uuid columns 2019-05-27 17:20:20 +02:00
Nils Domrose
4c9cc9890c adapt travis to not enable conflicting features 2019-05-27 00:41:42 +02:00
Nils Domrose
f57b407c60 fix cargo syntax 2019-05-27 00:29:31 +02:00
Nils Domrose
ce0651b79c fix mysql package in ubuntu 2019-05-27 00:23:42 +02:00
Nils Domrose
edc26cb1e1 adapt pipline to no enable conflicting features 2019-05-27 00:19:59 +02:00
Nils Domrose
ff759397f6 initial mysql support 2019-05-26 23:03:05 +02:00
Emil Madsen
badd22ac3d Make docker image build 2019-05-20 22:36:27 +02:00
Emil Madsen
6f78395ef7 Passwordless sudo on azure? 2019-05-20 21:59:18 +02:00
Emil Madsen
5fb6531db8 Attempt to fix azure pipeline 2019-05-20 21:54:01 +02:00
Emil Madsen
eb9d5e1196 Reintroduce .env.template 2019-05-20 21:34:20 +02:00
Emil Madsen
233b48bdad Fix missing joinable in schema 2019-05-20 21:30:31 +02:00
Emil Madsen
e22e290f67 Fix key and type variable names for mysql 2019-05-20 21:24:29 +02:00
Emil Madsen
ab95a69dc8 Rework migrations for MySQL 2019-05-20 21:12:41 +02:00
Emil Madsen
85c8a01f4a Merge branch 'master' of github.com:Skeen/bitwarden_rs 2019-05-20 19:53:18 +02:00
Emil Madsen
42af7c6dab MySQL database 2019-05-20 19:53:14 +02:00
Daniel García
08a445e2ac Merge pull request #484 from mprasil/hub_repo_change
Point to the new docker hub image location
2019-05-17 15:44:07 +02:00
Daniel García
c0b2877da3 Update deps and swap back to official u2f crate again 2019-05-17 15:39:36 +02:00
Miro Prasil
cf8ca85289 Point to the new docker hub image location 2019-05-16 15:04:51 +01:00
Daniel García
a8a92f6c51 New vault patch release 2019-05-15 18:11:39 +02:00
Daniel García
95f833aacd Update dependencies to use new ring 2019-05-15 18:10:25 +02:00
Daniel García
4f45cc081f Update ring to 0.14, jwt to 6.0, and u2f 2019-05-11 23:18:18 +02:00
Daniel García
2a4cd24c60 Updated web vault to hide org plans again and updated dependencies 2019-05-11 22:27:51 +02:00
TheMardy
ef551f4cc6 Create Backup funcitonality
Added create backup functionality to the admin panel
2019-05-03 15:46:29 +02:00
Daniel García
4545f271c3 Merge pull request #473 from Starbix/patch-1
Update Runtime Base Image to Alpine v3.9
2019-05-02 22:33:43 +02:00
Cédric Laubacher
2768396a72 Update Runtime Base Image to Alpine v3.9 2019-05-02 21:28:34 +02:00
Daniel García
5521a86693 Change path for served images to avoid collision with vault images 2019-05-01 16:19:22 +02:00
Daniel García
3160780549 Merge pull request #401 from TheMardy/master
Images in Email Templates
2019-04-30 17:52:10 +02:00
TheMardy
f0701657a9 Changed to Bitwarden_RS Logo 2019-04-30 16:08:53 +02:00
Daniel García
21325b7523 Updated .env template 2019-04-27 20:14:37 +02:00
Daniel García
874f5c34bd Formatting 2019-04-26 22:08:26 +02:00
Daniel García
eadab2e9ca Updated dependencies 2019-04-26 22:07:00 +02:00
Daniel García
253faaf023 Use users duo host when required, instead of always using the global one 2019-04-15 13:07:23 +02:00
Daniel García
3d843a6a51 Merge pull request #460 from janost/organization-vault-purge
Fixed purging organization vault
2019-04-14 22:30:51 +02:00
janost
03fdf36bf9 Fixed purging organization vault 2019-04-14 22:12:48 +02:00
Daniel García
fdcc32beda Validate Duo credentials when custom 2019-04-14 22:05:05 +02:00
Daniel García
bf20355c5e Merge branch 'duo' 2019-04-14 22:02:55 +02:00
Daniel García
0136c793b4 Implement better user status API, in the future we'll probably want a way to disable users.
We should migrate from the empty password hash to a separate column then.
2019-04-13 00:01:52 +02:00
Daniel García
2e12114350 Always create the user when inviting from admin panel 2019-04-12 23:44:49 +02:00
Daniel García
f25ab42ebb Merge pull request #455 from ViViDboarder/get_users
Add new endpoint for retrieving all users
2019-04-11 20:42:44 +02:00
ViViDboarder
d3a8a278e6 Add new endpoint for retrieving all users 2019-04-11 11:24:53 -07:00
Daniel García
8d9827c55f Implement selection between global config and user settings for duo keys. 2019-04-11 18:40:03 +02:00
Daniel García
cad63f9761 Auto generate akey 2019-04-11 16:08:26 +02:00
Daniel García
bf446f44f9 Enable DATA_FOLDER to affect default CONFIG_FILE path 2019-04-11 15:41:13 +02:00
Daniel García
621f607297 Update dependencies and fix some warnings 2019-04-11 15:40:19 +02:00
Daniel García
d89bd707a8 Update vault release to show duo button 2019-04-07 18:58:32 +02:00
Daniel García
754087b990 Add global duo config and document options in .env template 2019-04-07 18:58:15 +02:00
Daniel García
cfbeb56371 Implement user duo, initial version
TODO:
- At the moment each user needs to configure a DUO application and input the API keys, we need to check if multiple users can register with the same keys correctly and if so we could implement a global setting.
- Sometimes the Duo frame doesn't load correctly, but canceling, reloading the page and logging in again seems to fix it for me.
2019-04-05 22:09:53 +02:00
Daniel García
3bb46ce496 Make the syslog crate non-optional when available 2019-04-02 22:35:22 +02:00
Daniel García
c5832f2b30 With the latest fern, syslog can be a config option instead of a build flag 2019-03-29 20:27:20 +01:00
Daniel García
d9406b0095 Update to web vault 2.10.0 2019-03-25 23:49:12 +01:00
Daniel García
2475c36a75 Implement log_level config option 2019-03-25 14:23:14 +01:00
Daniel García
c384f9c0ca Set default log level to Info, we don't use debug anyway and it just fills the logs with other crates info. 2019-03-25 14:21:50 +01:00
Daniel García
afbfebf659 Merge pull request #440 from BlackDex/mail-encoding
Fixed long e-mail message extending 1000 lines.
2019-03-25 13:09:51 +01:00
BlackDex
6b686c18f7 Fixed long e-mail message extending 1000 lines.
- Added quoted_printable crate to encode the e-mail messages.
- Change the way the e-mail gets build to use custom part headers.
2019-03-25 09:48:19 +01:00
Daniel García
349cb33fbd Updated dependencies 2019-03-23 19:48:22 +01:00
Daniel García
d7542b6818 Merge pull request #437 from njfox/fix-smtp-error
Split up long line to stop SMTP from breaking
2019-03-21 14:22:57 +01:00
Nick Fox
7976d39d9d Adjust whitespace 2019-03-20 23:29:29 -04:00
Nick Fox
5ee9676941 Break up long line to stop SMTP from breaking 2019-03-20 23:24:30 -04:00
Daniel García
4b40cda910 Added domain blacklist regex for icons service and improved valid domain check.
Reorganized the icons code a bit.
2019-03-18 22:12:39 +01:00
Daniel García
4689ed7b30 Changed uppercase deserializer to avoid a clone. 2019-03-18 22:02:37 +01:00
Daniel García
084bc2aee3 Use final release of lettre and update dependencies 2019-03-17 14:43:22 +01:00
Daniel García
6d7e15b2fd Use web vault 2.9.0 release 2019-03-14 13:29:03 +01:00
Daniel García
61515160a7 Allow changing error codes and create an empty error.
Return 404 instead of 400 when no accounts breached.
2019-03-14 00:17:36 +01:00
Daniel García
a25bfdd16d Remove unused features from multipart (integration with other servers) 2019-03-13 15:57:00 +01:00
Daniel García
e93538cea9 Add option to use wrapped TLS in email, instead of STARTTLS upgrade 2019-03-10 14:45:42 +01:00
Daniel García
b4244b28b6 Update admin page scripts and fixed broken tooltip 2019-03-09 14:41:34 +01:00
Daniel García
43f9038325 Add option to force resync clients in admin panel 2019-03-07 21:08:33 +01:00
Daniel García
27872f476e Update dependencies 2019-03-07 20:22:08 +01:00
Daniel García
339044f8aa Add warning about config panel values overriding env vars. 2019-03-07 20:22:02 +01:00
Daniel García
0718a090e1 Trim spaces from admin token during authentication and validate that the admin panel token is not empty 2019-03-07 20:21:50 +01:00
Daniel García
9e1f030a80 Explicitly close SMTP connection in case of error. 2019-03-07 20:21:10 +01:00
Daniel García
04922f6aa0 Some formatting and dependency updates 2019-03-03 16:11:55 +01:00
Daniel García
7d2bc9e162 Added option to force 2fa at logins and made some changes to two factor code.
Added newlines to config options to keep them a reasonable length.
2019-03-03 16:09:15 +01:00
Daniel García
c6c00729e3 Update vault to new version. No need to wait for a release when even the official web vault is already using it 2019-02-27 17:28:04 +01:00
Daniel García
10756b0920 Update dependencies and fix some lints 2019-02-27 17:21:04 +01:00
Daniel García
1eb1502a07 Merge pull request #416 from mprasil/armv6
Armv6
2019-02-25 18:26:53 +01:00
Miroslav Prasil
30e72a96a9 Symlink missing ld-linux file 2019-02-25 16:17:34 +00:00
Daniel García
2646db78a4 Merge pull request #414 from FrankPetrilli/patch-1
Minor typo fix conect => connect
2019-02-25 14:21:28 +01:00
Miroslav Prasil
f5358b13f5 Add Dockerfile for armv6 2019-02-25 12:17:22 +00:00
Frank Petrilli
d156170971 Minor typo fix conect => connect 2019-02-24 16:08:38 -08:00
Daniel García
d9bfe847db Merge pull request #410 from gdamjan/remove-uneeded-mutability
remove some unneeded mutability
2019-02-22 22:52:53 +01:00
Дамјан Георгиевски
473f8b8e31 remove some unneeded mutability 2019-02-22 20:25:50 +01:00
Daniel García
aeb4b4c8a5 Remove verbose, otherwise the logs get filled with useless info 2019-02-22 16:16:07 +01:00
Daniel García
980a3e45db Set up CI with Azure Pipelines 2019-02-22 15:51:30 +01:00
Daniel García
5794969f5b Merge pull request #406 from shauder/feature/disable-admin-token
Allow the Admin token to be disabled in the advanced menu
2019-02-20 23:06:52 +01:00
Shane Faulkner
8b5b06c3d1 Allow the Admin token to be disabled in the advanced menu 2019-02-20 14:56:08 -06:00
Daniel García
b50c27b619 Print a warning when an env variable is being overriden by the config file, and reorganize the main file a bit.
Modified the JWT key generation, now it should also show the output of OpenSSL in the logs.
2019-02-20 20:59:37 +01:00
Daniel García
5ee04e31e5 Updated dependencies, removed some unnecessary clones and fixed some lints 2019-02-20 17:54:18 +01:00
Daniel García
bf6ae91a6d Remove margins on small devices 2019-02-18 20:43:34 +01:00
Daniel García
828e3a5795 Add extra padding when the toolbar collapses in small devices 2019-02-18 20:33:32 +01:00
Daniel García
7b5bcd45f8 Show read-only options in the config panel and the env variable names in the tooltips 2019-02-18 19:25:33 +01:00
Daniel García
72de16fb86 Merge pull request #404 from mprasil/disable_wal
Add an option to not enable WAL (should help in #399)
2019-02-18 16:10:16 +01:00
Miroslav Prasil
0b903fc5f4 Extended the template file and refer to wiki 2019-02-18 14:57:21 +00:00
Miroslav Prasil
4df686f49e Add an option to not enable WAL (should help in #399) 2019-02-18 10:48:48 +00:00
Daniel García
d7eeaaf249 Escape user data from admin panel when calling JS 2019-02-17 15:24:14 +01:00
TheMardy
84fb6aaddb Set correct MIME type 2019-02-17 01:08:24 +01:00
Daniel García
a744b9437a Implemented multiple U2f keys, key names, and compromised checks 2019-02-16 23:07:48 +01:00
Daniel García
6027b969f5 Delete old devices when deauthorizing user sessions 2019-02-16 23:06:26 +01:00
Daniel García
93805a5d7b Fix Yubikeys deleted on error 2019-02-16 21:30:55 +01:00
Daniel García
71da961ecd Merge pull request #402 from mprasil/version_in_docker
Include git repo in build so we get version
2019-02-16 12:20:25 +01:00
Miroslav Prasil
dd421809e5 Include git repo in build so we get version 2019-02-16 08:50:16 +00:00
TheMardy
8526055bb7 Added images to email templates 2019-02-16 03:48:23 +01:00
TheMardy
a79334ea4c Added static email image routes 2019-02-16 03:44:30 +01:00
Daniel García
274ea9a4f2 Use the latest fast_chemail crate directly, with the fix 2019-02-15 14:39:30 +01:00
Daniel García
8743d18aca Update travis image and remove now-ignored sudo tag 2019-02-13 18:50:45 +01:00
Daniel García
d3773a433a Removed list of mounted routes at startup by default, with option to add it back. This would get annoying when starting the server frequently, because it printed ~130 lines of mostly useless info 2019-02-13 00:03:16 +01:00
Daniel García
0f0a87becf Add version to initial message 2019-02-12 22:47:00 +01:00
Daniel García
4b57bb8eeb Merge pull request #394 from BlackDex/icon-timeout
Added config option for icon download timeout
2019-02-12 22:00:12 +01:00
BlackDex
3b27dbb0aa Added config option for icon download timeout 2019-02-12 21:56:28 +01:00
Daniel García
ff2fbd322e Update deps and fix email check 2019-02-12 15:01:02 +01:00
Daniel García
9636f33fdb Implement constant time equal check for admin, 2fa recover and 2fa remember tokens 2019-02-11 23:45:55 +01:00
Daniel García
bbe2a1b264 Merge pull request #391 from TheMardy/master
Updated Email Templates
2019-02-10 22:03:20 +01:00
Daniel García
79fdfd6524 Add missing url parameter 2019-02-10 21:40:20 +01:00
Daniel García
d086a99e5b Implemented HTML emails with text alternative 2019-02-10 19:12:34 +01:00
TheMardy
22b0b95209 Added HTML templates (+14 squashed commit)
Squashed commit:

[ece2260] Plaintext send_org_invite

[01d4884] Plaintext pw_hint_some

[6ce5173] Plaintext pw_hint_none

[881af3e] Plaintext invite_confirmed

[ce78621] Plaintext invite_accepted

[13a44a4] Rename send_org_invite.hbs to send_org_invite.html.hbs

[b52bf2f] Rename pw_hint_some.hbs to pw_hint_some.html.hbs

[e0d1aeb] Rename pw_hint_none.hbs to pw_hint_none.html.hbs

[898dbcd] Rename invite_confirmed.hbs to invite_confirmed.html.hbs

[107af31] Rename invite_accepted.hbs to invite_accepted.html.hbs

[d26d662] Updated send_org_invite template

[71f47af] Updated pw_hint_some template

[c2ca3c2] Updated pw_hint_none template

[50f8bfb] Updated invite_accepted template

[17f96f8] Updated invite_confirmed template
2019-02-10 19:04:18 +01:00
Daniel García
28d1588e73 Show version in admin panel 2019-02-10 16:02:46 +01:00
Daniel García
f3b1a5ff3e Error when admin panel is disabled 2019-02-10 15:26:19 +01:00
Daniel García
330e90a6ac Hide secrets in config panel 2019-02-08 20:49:04 +01:00
133 changed files with 19813 additions and 2497 deletions

View File

@@ -9,10 +9,6 @@ data
.idea .idea
*.iml *.iml
# Git files
.git
.gitignore
# Documentation # Documentation
*.md *.md

View File

@@ -4,8 +4,13 @@
## Main data folder ## Main data folder
# DATA_FOLDER=data # DATA_FOLDER=data
## Individual folders, these override %DATA_FOLDER% ## Database URL
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
## When using MySQL, this it is the URL to the DB, including username and password:
## Format: mysql://[user[:password]@]host/database_name
# DATABASE_URL=data/db.sqlite3 # DATABASE_URL=data/db.sqlite3
## Individual folders, these override %DATA_FOLDER%
# RSA_KEY_FILENAME=data/rsa_key # RSA_KEY_FILENAME=data/rsa_key
# ICON_CACHE_FOLDER=data/icon_cache # ICON_CACHE_FOLDER=data/icon_cache
# ATTACHMENTS_FOLDER=data/attachments # ATTACHMENTS_FOLDER=data/attachments
@@ -35,7 +40,7 @@
## Enable extended logging ## Enable extended logging
## This shows timestamps and allows logging to file and to syslog ## This shows timestamps and allows logging to file and to syslog
### To enable logging to file, use the LOG_FILE env variable ### To enable logging to file, use the LOG_FILE env variable
### To enable syslog, you need to compile with `cargo build --features=enable_syslog' ### To enable syslog, use the USE_SYSLOG env variable
# EXTENDED_LOGGING=true # EXTENDED_LOGGING=true
## Logging to file ## Logging to file
@@ -43,12 +48,50 @@
## It's recommended to also set 'ROCKET_CLI_COLORS=off' ## It's recommended to also set 'ROCKET_CLI_COLORS=off'
# LOG_FILE=/path/to/log # LOG_FILE=/path/to/log
## Logging to Syslog
## This requires extended logging
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
# USE_SYSLOG=false
## Log level
## Change the verbosity of the log output
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
## This requires extended logging
# LOG_LEVEL=Info
## Enable WAL for the DB
## Set to false to avoid enabling WAL during startup.
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
## this setting only prevents bitwarden_rs from automatically enabling it on start.
## Please read project wiki page about this setting first before changing the value as it can
## cause performance degradation or might render the service unable to start.
# ENABLE_DB_WAL=true
## Disable icon downloading ## Disable icon downloading
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER, ## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0, ## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
## otherwise it will delete them and they won't be downloaded again. ## otherwise it will delete them and they won't be downloaded again.
# DISABLE_ICON_DOWNLOAD=false # DISABLE_ICON_DOWNLOAD=false
## Icon download timeout
## Configure the timeout value when downloading the favicons.
## The default is 10 seconds, but this could be to low on slower network connections
# ICON_DOWNLOAD_TIMEOUT=10
## Icon blacklist Regex
## Any domains or IPs that match this regex won't be fetched by the icon service.
## Useful to hide other servers in the local network. Check the WIKI for more details
# ICON_BLACKLIST_REGEX=192\.168\.1\.[0-9].*^
## Any IP which is not defined as a global IP will be blacklisted.
## Usefull to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
## Disable 2FA remember
## Enabling this would force the users to use a second factor to login every time.
## Note that the checkbox would still be present, but ignored.
# DISABLE_2FA_REMEMBER=false
## Controls if new users can register ## Controls if new users can register
# SIGNUPS_ALLOWED=true # SIGNUPS_ALLOWED=true
@@ -56,6 +99,7 @@
## One option is to use 'openssl rand -base64 48' ## One option is to use 'openssl rand -base64 48'
## If not set, the admin panel is disabled ## If not set, the admin panel is disabled
# ADMIN_TOKEN=Vy2VyYTTsKPv8W5aEOWUbB/Bt3DEKePbHmI4m9VcemUMS2rEviDowNAFqYi1xjmp # ADMIN_TOKEN=Vy2VyYTTsKPv8W5aEOWUbB/Bt3DEKePbHmI4m9VcemUMS2rEviDowNAFqYi1xjmp
# DISABLE_ADMIN_TOKEN=false
## Invitations org admins to invite users, even when signups are disabled ## Invitations org admins to invite users, even when signups are disabled
# INVITATIONS_ALLOWED=true # INVITATIONS_ALLOWED=true
@@ -82,6 +126,17 @@
# YUBICO_SECRET_KEY=AAAAAAAAAAAAAAAAAAAAAAAA # YUBICO_SECRET_KEY=AAAAAAAAAAAAAAAAAAAAAAAA
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify # YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
## Duo Settings
## You need to configure all options to enable global Duo support, otherwise users would need to configure it themselves
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
## Then set the following options, based on the values obtained from the last step:
# DUO_IKEY=<Integration Key>
# DUO_SKEY=<Secret Key>
# DUO_HOST=<API Hostname>
## After that, you should be able to follow the rest of the guide linked above,
## ignoring the fields that ask for the values that you already configured beforehand.
## Rocket specific settings, check Rocket documentation to learn more ## Rocket specific settings, check Rocket documentation to learn more
# ROCKET_ENV=staging # ROCKET_ENV=staging
# ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app # ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app
@@ -97,4 +152,5 @@
# SMTP_PORT=587 # SMTP_PORT=587
# SMTP_SSL=true # SMTP_SSL=true
# SMTP_USERNAME=username # SMTP_USERNAME=username
# SMTP_PASSWORD=password # SMTP_PASSWORD=password
# SMTP_AUTH_MECHANISM="Plain"

7
.hadolint.yaml Normal file
View File

@@ -0,0 +1,7 @@
ignored:
# disable explicit version for apt install
- DL3008
# disable explicit version for apk install
- DL3018
trustedRegistries:
- docker.io

View File

@@ -1,9 +1,20 @@
# Copied from Rocket's .travis.yml dist: xenial
env:
global:
- HADOLINT_VERSION=1.17.1
language: rust language: rust
sudo: required # so we get a VM with higher specs rust: nightly
dist: trusty # so we get a VM with higher specs
cache: cargo cache: cargo
rust:
- nightly before_install:
- sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint
- sudo chmod +rx /usr/local/bin/hadolint
# Nothing to install
install: true
script: script:
- cargo build --verbose --all-features - git ls-files --exclude='Dockerfile*' --ignored | xargs --max-lines=1 hadolint
- cargo build --features "sqlite"
- cargo build --features "mysql"

2303
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,53 +11,59 @@ publish = false
build = "build.rs" build = "build.rs"
[features] [features]
enable_syslog = ["syslog", "fern/syslog-4"] # Empty to keep compatibility, prefer to set USE_SYSLOG=true
enable_syslog = []
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
postgresql = ["diesel/postgres", "diesel_migrations/postgres", "openssl"]
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
[target."cfg(not(windows))".dependencies]
syslog = "4.0.1"
[dependencies] [dependencies]
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed. # Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
rocket = { version = "0.4.0", features = ["tls"], default-features = false } rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
rocket_contrib = "0.4.0" rocket_contrib = "0.5.0-dev"
# HTTP client # HTTP client
reqwest = "0.9.9" reqwest = "0.9.20"
# multipart/form-data support # multipart/form-data support
multipart = "0.16.1" multipart = { version = "0.16.1", features = ["server"], default-features = false }
# WebSockets library # WebSockets library
ws = "0.7.9" ws = "0.9.0"
# MessagePack library # MessagePack library
rmpv = "0.4.0" rmpv = "0.4.1"
# Concurrent hashmap implementation # Concurrent hashmap implementation
chashmap = "2.2.0" chashmap = "2.2.2"
# A generic serialization/deserialization framework # A generic serialization/deserialization framework
serde = "1.0.85" serde = "1.0.101"
serde_derive = "1.0.85" serde_derive = "1.0.101"
serde_json = "1.0.37" serde_json = "1.0.40"
# Logging # Logging
log = "0.4.6" log = "0.4.8"
fern = "0.5.7" fern = { version = "0.5.8", features = ["syslog-4"] }
syslog = { version = "4.0.1", optional = true }
# A safe, extensible ORM and Query builder # A safe, extensible ORM and Query builder
diesel = { version = "1.4.1", features = ["sqlite", "chrono", "r2d2"] } diesel = { version = "1.4.2", features = [ "chrono", "r2d2"] }
diesel_migrations = { version = "1.4.0", features = ["sqlite"] } diesel_migrations = "1.4.0"
# Bundled SQLite # Bundled SQLite
libsqlite3-sys = { version = "0.12.0", features = ["bundled"] } libsqlite3-sys = { version = "0.12.0", features = ["bundled"], optional = true }
# Crypto library # Crypto library
ring = { version = "0.13.5", features = ["rsa_signing"] } ring = "0.14.6"
# UUID generation # UUID generation
uuid = { version = "0.7.2", features = ["v4"] } uuid = { version = "0.7.4", features = ["v4"] }
# Date and time library for Rust # Date and time library for Rust
chrono = "0.4.6" chrono = "0.4.9"
# TOTP library # TOTP library
oath = "0.10.2" oath = "0.10.2"
@@ -66,44 +72,50 @@ oath = "0.10.2"
data-encoding = "2.1.2" data-encoding = "2.1.2"
# JWT library # JWT library
jsonwebtoken = "5.0.1" jsonwebtoken = "6.0.1"
# U2F library # U2F library
u2f = "0.1.4" u2f = "0.1.6"
# Yubico Library # Yubico Library
yubico = { version = "0.5.1", features = ["online"], default-features = false } yubico = { version = "0.6.1", features = ["online", "online-tokio"], default-features = false }
# A `dotenv` implementation for Rust # A `dotenv` implementation for Rust
dotenv = { version = "0.13.0", default-features = false } dotenv = { version = "0.14.1", default-features = false }
# Lazy static macro # Lazy static macro
lazy_static = { version = "1.2.0", features = ["nightly"] } lazy_static = "1.4.0"
# More derives # More derives
derive_more = "0.13.0" derive_more = "0.15.0"
# Numerical libraries # Numerical libraries
num-traits = "0.2.6" num-traits = "0.2.8"
num-derive = "0.2.4" num-derive = "0.2.5"
# Email libraries # Email libraries
lettre = "0.9.0" lettre = "0.9.2"
lettre_email = "0.9.0" lettre_email = "0.9.2"
native-tls = "0.2.2" native-tls = "0.2.3"
quoted_printable = "0.4.1"
# Template library # Template library
handlebars = "1.1.0" handlebars = "2.0.2"
# For favicon extraction from main website # For favicon extraction from main website
soup = "0.3.0" soup = "0.4.1"
regex = "1.1.0" regex = "1.3.1"
# Required for SSL support for PostgreSQL
openssl = { version = "0.10.24", optional = true }
# URL encoding library
percent-encoding = "2.1.0"
[patch.crates-io] [patch.crates-io]
# Add support for Timestamp type # Add support for Timestamp type
rmp = { git = 'https://github.com/dani-garcia/msgpack-rust' } rmp = { git = 'https://github.com/3Hren/msgpack-rust', rev = 'd6c6c672e470341207ed9feb69b56322b5597a11' }
# Use new native_tls version 0.2
lettre = { git = 'https://github.com/lettre/lettre', rev = 'c988b1760ad81' }
lettre_email = { git = 'https://github.com/lettre/lettre', rev = 'c988b1760ad81' }
# Use newest ring
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dbcb0a75b9556763ac3ab708f40c8f8ed75f1a1e' }
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dbcb0a75b9556763ac3ab708f40c8f8ed75f1a1e' }

View File

@@ -1,86 +0,0 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine as vault
ENV VAULT_VERSION "v2.8.0d"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --update-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust as build
# Using bundled SQLite, no need to install it
# RUN apt-get update && apt-get install -y\
# sqlite3\
# --no-install-recommends\
# && rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM debian:stretch-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
# Install needed libraries
RUN apt-get update && apt-get install -y\
openssl\
ca-certificates\
--no-install-recommends\
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs .
# Configures the startup!
CMD ./bitwarden_rs

1
Dockerfile Symbolic link
View File

@@ -0,0 +1 @@
docker/amd64/sqlite/Dockerfile

View File

@@ -3,7 +3,7 @@
--- ---
[![Travis Build Status](https://travis-ci.org/dani-garcia/bitwarden_rs.svg?branch=master)](https://travis-ci.org/dani-garcia/bitwarden_rs) [![Travis Build Status](https://travis-ci.org/dani-garcia/bitwarden_rs.svg?branch=master)](https://travis-ci.org/dani-garcia/bitwarden_rs)
[![Docker Pulls](https://img.shields.io/docker/pulls/mprasil/bitwarden.svg)](https://hub.docker.com/r/mprasil/bitwarden) [![Docker Pulls](https://img.shields.io/docker/pulls/bitwardenrs/server.svg)](https://hub.docker.com/r/bitwardenrs/server)
[![Dependency Status](https://deps.rs/repo/github/dani-garcia/bitwarden_rs/status.svg)](https://deps.rs/repo/github/dani-garcia/bitwarden_rs) [![Dependency Status](https://deps.rs/repo/github/dani-garcia/bitwarden_rs/status.svg)](https://deps.rs/repo/github/dani-garcia/bitwarden_rs)
[![GitHub Release](https://img.shields.io/github/release/dani-garcia/bitwarden_rs.svg)](https://github.com/dani-garcia/bitwarden_rs/releases/latest) [![GitHub Release](https://img.shields.io/github/release/dani-garcia/bitwarden_rs.svg)](https://github.com/dani-garcia/bitwarden_rs/releases/latest)
[![GPL-3.0 Licensed](https://img.shields.io/github/license/dani-garcia/bitwarden_rs.svg)](https://github.com/dani-garcia/bitwarden_rs/blob/master/LICENSE.txt) [![GPL-3.0 Licensed](https://img.shields.io/github/license/dani-garcia/bitwarden_rs.svg)](https://github.com/dani-garcia/bitwarden_rs/blob/master/LICENSE.txt)
@@ -34,8 +34,8 @@ Basically full implementation of Bitwarden API is provided including:
Pull the docker image and mount a volume from the host for persistent storage: Pull the docker image and mount a volume from the host for persistent storage:
```sh ```sh
docker pull mprasil/bitwarden:latest docker pull bitwardenrs/server:latest
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 mprasil/bitwarden:latest docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 bitwardenrs/server:latest
``` ```
This will preserve any persistent data under /bw-data/, you can adapt the path to whatever suits you. This will preserve any persistent data under /bw-data/, you can adapt the path to whatever suits you.

25
azure-pipelines.yml Normal file
View File

@@ -0,0 +1,25 @@
pool:
vmImage: 'Ubuntu-16.04'
steps:
- script: |
ls -la
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $(cat rust-toolchain)
echo "##vso[task.prependpath]$HOME/.cargo/bin"
displayName: 'Install Rust'
- script: |
sudo apt-get update
sudo apt-get install -y libmysql++-dev
displayName: Install libmysql
- script: |
rustc -Vv
cargo -V
displayName: Query rust and cargo versions
- script : cargo build --features "sqlite"
displayName: 'Build project with sqlite backend'
- script : cargo build --features "mysql"
displayName: 'Build project with mysql backend'

View File

@@ -1,11 +1,25 @@
use std::process::Command; use std::process::Command;
fn main() { fn main() {
#[cfg(all(feature = "sqlite", feature = "mysql"))]
compile_error!("Can't enable both sqlite and mysql at the same time");
#[cfg(all(feature = "sqlite", feature = "postgresql"))]
compile_error!("Can't enable both sqlite and postgresql at the same time");
#[cfg(all(feature = "mysql", feature = "postgresql"))]
compile_error!("Can't enable both mysql and postgresql at the same time");
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
read_git_info().ok(); read_git_info().ok();
} }
fn run(args: &[&str]) -> Result<String, std::io::Error> { fn run(args: &[&str]) -> Result<String, std::io::Error> {
let out = Command::new(args[0]).args(&args[1..]).output()?; let out = Command::new(args[0]).args(&args[1..]).output()?;
if !out.status.success() {
use std::io::{Error, ErrorKind};
return Err(Error::new(ErrorKind::Other, "Command not successful"));
}
Ok(String::from_utf8(out.stdout).unwrap().trim().to_string()) Ok(String::from_utf8(out.stdout).unwrap().trim().to_string())
} }
@@ -13,8 +27,10 @@ fn run(args: &[&str]) -> Result<String, std::io::Error> {
fn read_git_info() -> Result<(), std::io::Error> { fn read_git_info() -> Result<(), std::io::Error> {
// The exact tag for the current commit, can be empty when // The exact tag for the current commit, can be empty when
// the current commit doesn't have an associated tag // the current commit doesn't have an associated tag
let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"])?; let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"]).ok();
println!("cargo:rustc-env=GIT_EXACT_TAG={}", exact_tag); if let Some(ref exact) = exact_tag {
println!("cargo:rustc-env=GIT_EXACT_TAG={}", exact);
}
// The last available tag, equal to exact_tag when // The last available tag, equal to exact_tag when
// the current commit is tagged // the current commit is tagged
@@ -27,13 +43,25 @@ fn read_git_info() -> Result<(), std::io::Error> {
// The current git commit hash // The current git commit hash
let rev = run(&["git", "rev-parse", "HEAD"])?; let rev = run(&["git", "rev-parse", "HEAD"])?;
let rev_short = rev.get(..12).unwrap_or_default(); let rev_short = rev.get(..8).unwrap_or_default();
println!("cargo:rustc-env=GIT_REV={}", rev_short); println!("cargo:rustc-env=GIT_REV={}", rev_short);
// Combined version
let version = if let Some(exact) = exact_tag {
exact
} else if &branch != "master" {
format!("{}-{} ({})", last_tag, rev_short, branch)
} else {
format!("{}-{}", last_tag, rev_short)
};
println!("cargo:rustc-env=GIT_VERSION={}", version);
// To access these values, use: // To access these values, use:
// env!("GIT_EXACT_TAG") // env!("GIT_EXACT_TAG")
// env!("GIT_LAST_TAG") // env!("GIT_LAST_TAG")
// env!("GIT_BRANCH") // env!("GIT_BRANCH")
// env!("GIT_REV") // env!("GIT_REV")
// env!("GIT_VERSION")
Ok(()) Ok(())
} }

View File

@@ -2,29 +2,35 @@
# https://docs.docker.com/develop/develop-images/multistage-build/ # https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/ # https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE ####################### ####################### VAULT BUILD IMAGE #######################
FROM alpine as vault FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.8.0d" ENV VAULT_VERSION "v2.12.0"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz" ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --update-cache --upgrade \ RUN apk add --no-cache --upgrade \
curl \ curl \
tar tar
RUN mkdir /web-vault RUN mkdir /web-vault
WORKDIR /web-vault WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz RUN curl -L $URL | tar xz
RUN ls RUN ls
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because # We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling # we need the Rust compiler and Cargo tooling
FROM rust as build FROM rust:1.36 as build
# set mysql backend
ARG DB=mysql
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \
gcc-aarch64-linux-gnu \ gcc-aarch64-linux-gnu \
&& mkdir -p ~/.cargo \ && mkdir -p ~/.cargo \
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \ && echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
@@ -41,8 +47,10 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
&& dpkg --add-architecture arm64 \ && dpkg --add-architecture arm64 \
&& apt-get update \ && apt-get update \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \
libssl-dev:arm64 \ libssl-dev:arm64 \
libc6-dev:arm64 libc6-dev:arm64 \
libmariadb-dev:arm64
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
ENV CROSS_COMPILE="1" ENV CROSS_COMPILE="1"
@@ -55,7 +63,7 @@ COPY . .
# Build # Build
RUN rustup target add aarch64-unknown-linux-gnu RUN rustup target add aarch64-unknown-linux-gnu
RUN cargo build --release --target=aarch64-unknown-linux-gnu -v RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu -v
######################## RUNTIME IMAGE ######################## ######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image # Create a new stage with a minimal image
@@ -69,10 +77,12 @@ ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ] RUN [ "cross-build-start" ]
# Install needed libraries # Install needed libraries
RUN apt-get update && apt-get install -y\ RUN apt-get update && apt-get install -y \
openssl\ --no-install-recommends \
ca-certificates\ openssl \
--no-install-recommends\ ca-certificates \
curl \
libmariadbclient-dev \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
RUN mkdir /data RUN mkdir /data
@@ -88,5 +98,9 @@ COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs . COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup! # Configures the startup!
CMD ./bitwarden_rs CMD ["./bitwarden_rs"]

View File

@@ -0,0 +1,106 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.36 as build
# set sqlite as default for DB ARG for backward comaptibility
ARG DB=sqlite
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-aarch64-linux-gnu \
&& mkdir -p ~/.cargo \
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
WORKDIR /app
# Prepare openssl arm64 libs
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture arm64 \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:arm64 \
libc6-dev:arm64 \
libmariadb-dev:arm64
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Build
RUN rustup target add aarch64-unknown-linux-gnu
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/aarch64-debian:stretch
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
sqlite3 \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
CMD ["./bitwarden_rs"]

View File

@@ -0,0 +1,103 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.36 as build
# set mysql backend
ARG DB=mysql
# Using bundled SQLite, no need to install it
# RUN apt-get update && apt-get install -y\
# --no-install-recommends \
# sqlite3\
# && rm -rf /var/lib/apt/lists/*
# Install MySQL package
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libmariadb-dev \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM debian:stretch-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
libmariadbclient-dev \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
CMD ["./bitwarden_rs"]

View File

@@ -2,28 +2,39 @@
# https://docs.docker.com/develop/develop-images/multistage-build/ # https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/ # https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE ####################### ####################### VAULT BUILD IMAGE #######################
FROM alpine as vault FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.8.0d" ENV VAULT_VERSION "v2.12.0"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz" ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --update-cache --upgrade \ RUN apk add --no-cache --upgrade \
curl \ curl \
tar tar
RUN mkdir /web-vault RUN mkdir /web-vault
WORKDIR /web-vault WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz RUN curl -L $URL | tar xz
RUN ls RUN ls
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary # Musl build image for statically compiled binary
FROM clux/muslrust:nightly-2018-12-01 as build FROM clux/muslrust:nightly-2019-07-08 as build
# set mysql backend
ARG DB=mysql
ENV USER "root" ENV USER "root"
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libmysqlclient-dev \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app WORKDIR /app
# Copies the complete project # Copies the complete project
@@ -32,13 +43,16 @@ COPY . .
RUN rustup target add x86_64-unknown-linux-musl RUN rustup target add x86_64-unknown-linux-musl
# Make sure that we actually build the project
RUN touch src/main.rs
# Build # Build
RUN cargo build --release RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ######################## ######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image # Create a new stage with a minimal image
# because we already have a binary built # because we already have a binary built
FROM alpine:3.8 FROM alpine:3.10
ENV ROCKET_ENV "staging" ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80 ENV ROCKET_PORT=80
@@ -46,10 +60,11 @@ ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs ENV SSL_CERT_DIR=/etc/ssl/certs
# Install needed libraries # Install needed libraries
RUN apk add \ RUN apk add --no-cache \
openssl\ openssl \
ca-certificates \ mariadb-connector-c \
&& rm /var/cache/apk/* curl \
ca-certificates
RUN mkdir /data RUN mkdir /data
VOLUME /data VOLUME /data
@@ -62,5 +77,9 @@ COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs . COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup! # Configures the startup!
CMD ./bitwarden_rs CMD ["./bitwarden_rs"]

View File

@@ -0,0 +1,104 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.36 as build
# set mysql backend
ARG DB=postgresql
# Using bundled SQLite, no need to install it
# RUN apt-get update && apt-get install -y\
# --no-install-recommends \
# sqlite3\
# && rm -rf /var/lib/apt/lists/*
# Install MySQL package
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libpq-dev \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM debian:stretch-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
sqlite3 \
libpq5 \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
CMD ["./bitwarden_rs"]

View File

@@ -0,0 +1,86 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary
FROM clux/muslrust:nightly-2019-07-08 as build
# set mysql backend
ARG DB=postgresql
ENV USER "root"
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libpq-dev \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
RUN rustup target add x86_64-unknown-linux-musl
# Make sure that we actually build the project
RUN touch src/main.rs
# Build
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM alpine:3.10
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs
# Install needed libraries
RUN apk add --no-cache \
openssl \
postgresql-libs \
curl \
sqlite \
ca-certificates
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
CMD ["./bitwarden_rs"]

View File

@@ -0,0 +1,103 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.36 as build
# set sqlite as default for DB ARG for backward comaptibility
ARG DB=sqlite
# Using bundled SQLite, no need to install it
# RUN apt-get update && apt-get install -y\
# --no-install-recommends \
# sqlite3 \
# && rm -rf /var/lib/apt/lists/*
# Install MySQL package
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libmariadb-dev \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM debian:stretch-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
sqlite3 \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
CMD ["./bitwarden_rs"]

View File

@@ -0,0 +1,86 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary
FROM clux/muslrust:nightly-2019-07-08 as build
# set sqlite as default for DB ARG for backward comaptibility
ARG DB=sqlite
ENV USER "root"
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libmysqlclient-dev \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
RUN rustup target add x86_64-unknown-linux-musl
# Make sure that we actually build the project
RUN touch src/main.rs
# Build
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM alpine:3.10
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs
# Install needed libraries
RUN apk add --no-cache \
openssl \
curl \
sqlite \
ca-certificates
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
CMD ["./bitwarden_rs"]

View File

@@ -0,0 +1,106 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.36 as build
# set mysql backend
ARG DB=mysql
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabi \
&& mkdir -p ~/.cargo \
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
WORKDIR /app
# Prepare openssl armel libs
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armel \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armel \
libc6-dev:armel \
libmariadb-dev:armel
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Build
RUN rustup target add arm-unknown-linux-gnueabi
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/rpi-debian:stretch
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
libmariadbclient-dev \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
CMD ["./bitwarden_rs"]

View File

@@ -0,0 +1,106 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.36 as build
# set sqlite as default for DB ARG for backward comaptibility
ARG DB=sqlite
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabi \
&& mkdir -p ~/.cargo \
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
WORKDIR /app
# Prepare openssl armel libs
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armel \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armel \
libc6-dev:armel \
libmariadb-dev:armel
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Build
RUN rustup target add arm-unknown-linux-gnueabi
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/rpi-debian:stretch
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
sqlite3 \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
CMD ["./bitwarden_rs"]

View File

@@ -2,29 +2,35 @@
# https://docs.docker.com/develop/develop-images/multistage-build/ # https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/ # https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE ####################### ####################### VAULT BUILD IMAGE #######################
FROM alpine as vault FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.8.0d" ENV VAULT_VERSION "v2.12.0"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz" ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --update-cache --upgrade \ RUN apk add --no-cache --upgrade \
curl \ curl \
tar tar
RUN mkdir /web-vault RUN mkdir /web-vault
WORKDIR /web-vault WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz RUN curl -L $URL | tar xz
RUN ls RUN ls
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because # We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling # we need the Rust compiler and Cargo tooling
FROM rust as build FROM rust:1.36 as build
# set mysql backend
ARG DB=mysql
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabihf \ gcc-arm-linux-gnueabihf \
&& mkdir -p ~/.cargo \ && mkdir -p ~/.cargo \
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \ && echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
@@ -41,8 +47,11 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
&& dpkg --add-architecture armhf \ && dpkg --add-architecture armhf \
&& apt-get update \ && apt-get update \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \
libssl-dev:armhf \ libssl-dev:armhf \
libc6-dev:armhf libc6-dev:armhf \
libmariadb-dev:armhf
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
ENV CROSS_COMPILE="1" ENV CROSS_COMPILE="1"
@@ -55,7 +64,7 @@ COPY . .
# Build # Build
RUN rustup target add armv7-unknown-linux-gnueabihf RUN rustup target add armv7-unknown-linux-gnueabihf
RUN cargo build --release --target=armv7-unknown-linux-gnueabihf -v RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf -v
######################## RUNTIME IMAGE ######################## ######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image # Create a new stage with a minimal image
@@ -69,11 +78,13 @@ ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ] RUN [ "cross-build-start" ]
# Install needed libraries # Install needed libraries
RUN apt-get update && apt-get install -y\ RUN apt-get update && apt-get install -y \
openssl\ --no-install-recommends \
ca-certificates\ openssl \
--no-install-recommends\ ca-certificates \
&& rm -rf /var/lib/apt/lists/* curl \
libmariadbclient-dev \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data RUN mkdir /data
@@ -88,5 +99,9 @@ COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs . COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup! # Configures the startup!
CMD ./bitwarden_rs CMD ["./bitwarden_rs"]

View File

@@ -0,0 +1,106 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.36 as build
# set sqlite as default for DB ARG for backward comaptibility
ARG DB=sqlite
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabihf \
&& mkdir -p ~/.cargo \
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
WORKDIR /app
# Prepare openssl armhf libs
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armhf \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armhf \
libc6-dev:armhf \
libmariadb-dev:armhf
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Build
RUN rustup target add armv7-unknown-linux-gnueabihf
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/armv7hf-debian:stretch
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
sqlite3 \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
CMD ["./bitwarden_rs"]

8
docker/healthcheck.sh Normal file
View File

@@ -0,0 +1,8 @@
#!/usr/bin/env sh
if [ -z "$ROCKET_TLS"]
then
curl --fail http://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
else
curl --insecure --fail https://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
fi

View File

@@ -0,0 +1,62 @@
CREATE TABLE users (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
email VARCHAR(255) NOT NULL UNIQUE,
name TEXT NOT NULL,
password_hash BLOB NOT NULL,
salt BLOB NOT NULL,
password_iterations INTEGER NOT NULL,
password_hint TEXT,
`key` TEXT NOT NULL,
private_key TEXT,
public_key TEXT,
totp_secret TEXT,
totp_recover TEXT,
security_stamp TEXT NOT NULL,
equivalent_domains TEXT NOT NULL,
excluded_globals TEXT NOT NULL
);
CREATE TABLE devices (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
name TEXT NOT NULL,
type INTEGER NOT NULL,
push_token TEXT,
refresh_token TEXT NOT NULL
);
CREATE TABLE ciphers (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
folder_uuid CHAR(36) REFERENCES folders (uuid),
organization_uuid CHAR(36),
type INTEGER NOT NULL,
name TEXT NOT NULL,
notes TEXT,
fields TEXT,
data TEXT NOT NULL,
favorite BOOLEAN NOT NULL
);
CREATE TABLE attachments (
id CHAR(36) NOT NULL PRIMARY KEY,
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
file_name TEXT NOT NULL,
file_size INTEGER NOT NULL
);
CREATE TABLE folders (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
name TEXT NOT NULL
);

View File

@@ -0,0 +1,30 @@
CREATE TABLE collections (
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
org_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
name TEXT NOT NULL
);
CREATE TABLE organizations (
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
billing_email TEXT NOT NULL
);
CREATE TABLE users_collections (
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
PRIMARY KEY (user_uuid, collection_uuid)
);
CREATE TABLE users_organizations (
uuid CHAR(36) NOT NULL PRIMARY KEY,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
access_all BOOLEAN NOT NULL,
`key` TEXT NOT NULL,
status INTEGER NOT NULL,
type INTEGER NOT NULL,
UNIQUE (user_uuid, org_uuid)
);

View File

@@ -0,0 +1,34 @@
ALTER TABLE ciphers RENAME TO oldCiphers;
CREATE TABLE ciphers (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
user_uuid CHAR(36) REFERENCES users (uuid), -- Make this optional
organization_uuid CHAR(36) REFERENCES organizations (uuid), -- Add reference to orgs table
-- Remove folder_uuid
type INTEGER NOT NULL,
name TEXT NOT NULL,
notes TEXT,
fields TEXT,
data TEXT NOT NULL,
favorite BOOLEAN NOT NULL
);
CREATE TABLE folders_ciphers (
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
folder_uuid CHAR(36) NOT NULL REFERENCES folders (uuid),
PRIMARY KEY (cipher_uuid, folder_uuid)
);
INSERT INTO ciphers (uuid, created_at, updated_at, user_uuid, organization_uuid, type, name, notes, fields, data, favorite)
SELECT uuid, created_at, updated_at, user_uuid, organization_uuid, type, name, notes, fields, data, favorite FROM oldCiphers;
INSERT INTO folders_ciphers (cipher_uuid, folder_uuid)
SELECT uuid, folder_uuid FROM oldCiphers WHERE folder_uuid IS NOT NULL;
DROP TABLE oldCiphers;
ALTER TABLE users_collections ADD COLUMN read_only BOOLEAN NOT NULL DEFAULT 0; -- False

View File

@@ -0,0 +1,5 @@
CREATE TABLE ciphers_collections (
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
PRIMARY KEY (cipher_uuid, collection_uuid)
);

View File

@@ -0,0 +1,14 @@
ALTER TABLE attachments RENAME TO oldAttachments;
CREATE TABLE attachments (
id CHAR(36) NOT NULL PRIMARY KEY,
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
file_name TEXT NOT NULL,
file_size INTEGER NOT NULL
);
INSERT INTO attachments (id, cipher_uuid, file_name, file_size)
SELECT id, cipher_uuid, file_name, file_size FROM oldAttachments;
DROP TABLE oldAttachments;

View File

@@ -0,0 +1,15 @@
CREATE TABLE twofactor (
uuid CHAR(36) NOT NULL PRIMARY KEY,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
type INTEGER NOT NULL,
enabled BOOLEAN NOT NULL,
data TEXT NOT NULL,
UNIQUE (user_uuid, type)
);
INSERT INTO twofactor (uuid, user_uuid, type, enabled, data)
SELECT UUID(), uuid, 0, 1, u.totp_secret FROM users u where u.totp_secret IS NOT NULL;
UPDATE users SET totp_secret = NULL; -- Instead of recreating the table, just leave the columns empty

View File

@@ -0,0 +1,3 @@
CREATE TABLE invitations (
email VARCHAR(255) NOT NULL PRIMARY KEY
);

View File

@@ -4,4 +4,4 @@ ALTER TABLE users
ALTER TABLE users ALTER TABLE users
ADD COLUMN ADD COLUMN
client_kdf_iter INTEGER NOT NULL DEFAULT 5000; client_kdf_iter INTEGER NOT NULL DEFAULT 100000;

View File

@@ -0,0 +1,3 @@
ALTER TABLE attachments
ADD COLUMN
`key` TEXT;

View File

@@ -0,0 +1,7 @@
ALTER TABLE attachments CHANGE COLUMN akey `key` TEXT;
ALTER TABLE ciphers CHANGE COLUMN atype type INTEGER NOT NULL;
ALTER TABLE devices CHANGE COLUMN atype type INTEGER NOT NULL;
ALTER TABLE twofactor CHANGE COLUMN atype type INTEGER NOT NULL;
ALTER TABLE users CHANGE COLUMN akey `key` TEXT;
ALTER TABLE users_organizations CHANGE COLUMN akey `key` TEXT;
ALTER TABLE users_organizations CHANGE COLUMN atype type INTEGER NOT NULL;

View File

@@ -0,0 +1,7 @@
ALTER TABLE attachments CHANGE COLUMN `key` akey TEXT;
ALTER TABLE ciphers CHANGE COLUMN type atype INTEGER NOT NULL;
ALTER TABLE devices CHANGE COLUMN type atype INTEGER NOT NULL;
ALTER TABLE twofactor CHANGE COLUMN type atype INTEGER NOT NULL;
ALTER TABLE users CHANGE COLUMN `key` akey TEXT;
ALTER TABLE users_organizations CHANGE COLUMN `key` akey TEXT;
ALTER TABLE users_organizations CHANGE COLUMN type atype INTEGER NOT NULL;

View File

@@ -0,0 +1,13 @@
DROP TABLE devices;
DROP TABLE attachments;
DROP TABLE users_collections;
DROP TABLE users_organizations;
DROP TABLE folders_ciphers;
DROP TABLE ciphers_collections;
DROP TABLE twofactor;
DROP TABLE invitations;
DROP TABLE collections;
DROP TABLE folders;
DROP TABLE ciphers;
DROP TABLE users;
DROP TABLE organizations;

View File

@@ -0,0 +1,121 @@
CREATE TABLE users (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
email VARCHAR(255) NOT NULL UNIQUE,
name TEXT NOT NULL,
password_hash BYTEA NOT NULL,
salt BYTEA NOT NULL,
password_iterations INTEGER NOT NULL,
password_hint TEXT,
akey TEXT NOT NULL,
private_key TEXT,
public_key TEXT,
totp_secret TEXT,
totp_recover TEXT,
security_stamp TEXT NOT NULL,
equivalent_domains TEXT NOT NULL,
excluded_globals TEXT NOT NULL,
client_kdf_type INTEGER NOT NULL DEFAULT 0,
client_kdf_iter INTEGER NOT NULL DEFAULT 100000
);
CREATE TABLE devices (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
name TEXT NOT NULL,
atype INTEGER NOT NULL,
push_token TEXT,
refresh_token TEXT NOT NULL,
twofactor_remember TEXT
);
CREATE TABLE organizations (
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
billing_email TEXT NOT NULL
);
CREATE TABLE ciphers (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
user_uuid CHAR(36) REFERENCES users (uuid),
organization_uuid CHAR(36) REFERENCES organizations (uuid),
atype INTEGER NOT NULL,
name TEXT NOT NULL,
notes TEXT,
fields TEXT,
data TEXT NOT NULL,
favorite BOOLEAN NOT NULL,
password_history TEXT
);
CREATE TABLE attachments (
id CHAR(36) NOT NULL PRIMARY KEY,
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
file_name TEXT NOT NULL,
file_size INTEGER NOT NULL,
akey TEXT
);
CREATE TABLE folders (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
name TEXT NOT NULL
);
CREATE TABLE collections (
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
org_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
name TEXT NOT NULL
);
CREATE TABLE users_collections (
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
read_only BOOLEAN NOT NULL DEFAULT false,
PRIMARY KEY (user_uuid, collection_uuid)
);
CREATE TABLE users_organizations (
uuid CHAR(36) NOT NULL PRIMARY KEY,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
access_all BOOLEAN NOT NULL,
akey TEXT NOT NULL,
status INTEGER NOT NULL,
atype INTEGER NOT NULL,
UNIQUE (user_uuid, org_uuid)
);
CREATE TABLE folders_ciphers (
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
folder_uuid CHAR(36) NOT NULL REFERENCES folders (uuid),
PRIMARY KEY (cipher_uuid, folder_uuid)
);
CREATE TABLE ciphers_collections (
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
PRIMARY KEY (cipher_uuid, collection_uuid)
);
CREATE TABLE twofactor (
uuid CHAR(36) NOT NULL PRIMARY KEY,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
atype INTEGER NOT NULL,
enabled BOOLEAN NOT NULL,
data TEXT NOT NULL,
UNIQUE (user_uuid, atype)
);
CREATE TABLE invitations (
email VARCHAR(255) NOT NULL PRIMARY KEY
);

View File

@@ -0,0 +1,26 @@
ALTER TABLE attachments ALTER COLUMN id TYPE CHAR(36);
ALTER TABLE attachments ALTER COLUMN cipher_uuid TYPE CHAR(36);
ALTER TABLE users ALTER COLUMN uuid TYPE CHAR(36);
ALTER TABLE users ALTER COLUMN email TYPE VARCHAR(255);
ALTER TABLE devices ALTER COLUMN uuid TYPE CHAR(36);
ALTER TABLE devices ALTER COLUMN user_uuid TYPE CHAR(36);
ALTER TABLE organizations ALTER COLUMN uuid TYPE CHAR(40);
ALTER TABLE ciphers ALTER COLUMN uuid TYPE CHAR(36);
ALTER TABLE ciphers ALTER COLUMN user_uuid TYPE CHAR(36);
ALTER TABLE ciphers ALTER COLUMN organization_uuid TYPE CHAR(36);
ALTER TABLE folders ALTER COLUMN uuid TYPE CHAR(36);
ALTER TABLE folders ALTER COLUMN user_uuid TYPE CHAR(36);
ALTER TABLE collections ALTER COLUMN uuid TYPE CHAR(40);
ALTER TABLE collections ALTER COLUMN org_uuid TYPE CHAR(40);
ALTER TABLE users_collections ALTER COLUMN user_uuid TYPE CHAR(36);
ALTER TABLE users_collections ALTER COLUMN collection_uuid TYPE CHAR(36);
ALTER TABLE users_organizations ALTER COLUMN uuid TYPE CHAR(36);
ALTER TABLE users_organizations ALTER COLUMN user_uuid TYPE CHAR(36);
ALTER TABLE users_organizations ALTER COLUMN org_uuid TYPE CHAR(36);
ALTER TABLE folders_ciphers ALTER COLUMN cipher_uuid TYPE CHAR(36);
ALTER TABLE folders_ciphers ALTER COLUMN folder_uuid TYPE CHAR(36);
ALTER TABLE ciphers_collections ALTER COLUMN cipher_uuid TYPE CHAR(36);
ALTER TABLE ciphers_collections ALTER COLUMN collection_uuid TYPE CHAR(36);
ALTER TABLE twofactor ALTER COLUMN uuid TYPE CHAR(36);
ALTER TABLE twofactor ALTER COLUMN user_uuid TYPE CHAR(36);
ALTER TABLE invitations ALTER COLUMN email TYPE VARCHAR(255);

View File

@@ -0,0 +1,27 @@
-- Switch from CHAR() types to VARCHAR() types to avoid padding issues.
ALTER TABLE attachments ALTER COLUMN id TYPE TEXT;
ALTER TABLE attachments ALTER COLUMN cipher_uuid TYPE VARCHAR(40);
ALTER TABLE users ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE users ALTER COLUMN email TYPE TEXT;
ALTER TABLE devices ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE devices ALTER COLUMN user_uuid TYPE VARCHAR(40);
ALTER TABLE organizations ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE ciphers ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE ciphers ALTER COLUMN user_uuid TYPE VARCHAR(40);
ALTER TABLE ciphers ALTER COLUMN organization_uuid TYPE VARCHAR(40);
ALTER TABLE folders ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE folders ALTER COLUMN user_uuid TYPE VARCHAR(40);
ALTER TABLE collections ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE collections ALTER COLUMN org_uuid TYPE VARCHAR(40);
ALTER TABLE users_collections ALTER COLUMN user_uuid TYPE VARCHAR(40);
ALTER TABLE users_collections ALTER COLUMN collection_uuid TYPE VARCHAR(40);
ALTER TABLE users_organizations ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE users_organizations ALTER COLUMN user_uuid TYPE VARCHAR(40);
ALTER TABLE users_organizations ALTER COLUMN org_uuid TYPE VARCHAR(40);
ALTER TABLE folders_ciphers ALTER COLUMN cipher_uuid TYPE VARCHAR(40);
ALTER TABLE folders_ciphers ALTER COLUMN folder_uuid TYPE VARCHAR(40);
ALTER TABLE ciphers_collections ALTER COLUMN cipher_uuid TYPE VARCHAR(40);
ALTER TABLE ciphers_collections ALTER COLUMN collection_uuid TYPE VARCHAR(40);
ALTER TABLE twofactor ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE twofactor ALTER COLUMN user_uuid TYPE VARCHAR(40);
ALTER TABLE invitations ALTER COLUMN email TYPE TEXT;

View File

@@ -0,0 +1,9 @@
DROP TABLE users;
DROP TABLE devices;
DROP TABLE ciphers;
DROP TABLE attachments;
DROP TABLE folders;

View File

@@ -0,0 +1,8 @@
DROP TABLE collections;
DROP TABLE organizations;
DROP TABLE users_collections;
DROP TABLE users_organizations;

View File

@@ -0,0 +1 @@
DROP TABLE ciphers_collections;

View File

@@ -0,0 +1 @@
-- This file should undo anything in `up.sql`

View File

@@ -0,0 +1,3 @@
ALTER TABLE devices
ADD COLUMN
twofactor_remember TEXT;

View File

@@ -0,0 +1,8 @@
UPDATE users
SET totp_secret = (
SELECT twofactor.data FROM twofactor
WHERE twofactor.type = 0
AND twofactor.user_uuid = users.uuid
);
DROP TABLE twofactor;

View File

@@ -0,0 +1,3 @@
ALTER TABLE ciphers
ADD COLUMN
password_history TEXT;

View File

@@ -0,0 +1 @@
DROP TABLE invitations;

View File

@@ -0,0 +1,7 @@
ALTER TABLE users
ADD COLUMN
client_kdf_type INTEGER NOT NULL DEFAULT 0; -- PBKDF2
ALTER TABLE users
ADD COLUMN
client_kdf_iter INTEGER NOT NULL DEFAULT 100000;

View File

@@ -0,0 +1,7 @@
ALTER TABLE attachments RENAME COLUMN akey TO key;
ALTER TABLE ciphers RENAME COLUMN atype TO type;
ALTER TABLE devices RENAME COLUMN atype TO type;
ALTER TABLE twofactor RENAME COLUMN atype TO type;
ALTER TABLE users RENAME COLUMN akey TO key;
ALTER TABLE users_organizations RENAME COLUMN akey TO key;
ALTER TABLE users_organizations RENAME COLUMN atype TO type;

View File

@@ -0,0 +1,7 @@
ALTER TABLE attachments RENAME COLUMN key TO akey;
ALTER TABLE ciphers RENAME COLUMN type TO atype;
ALTER TABLE devices RENAME COLUMN type TO atype;
ALTER TABLE twofactor RENAME COLUMN type TO atype;
ALTER TABLE users RENAME COLUMN key TO akey;
ALTER TABLE users_organizations RENAME COLUMN key TO akey;
ALTER TABLE users_organizations RENAME COLUMN type TO atype;

View File

@@ -1 +1 @@
nightly-2019-01-26 nightly-2019-08-27

View File

@@ -1,4 +1,5 @@
use serde_json::Value; use serde_json::Value;
use std::process::Command;
use rocket::http::{Cookie, Cookies, SameSite}; use rocket::http::{Cookie, Cookies, SameSite};
use rocket::request::{self, FlashMessage, Form, FromRequest, Request}; use rocket::request::{self, FlashMessage, Form, FromRequest, Request};
@@ -6,41 +7,55 @@ use rocket::response::{content::Html, Flash, Redirect};
use rocket::{Outcome, Route}; use rocket::{Outcome, Route};
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use crate::api::{ApiResult, EmptyResult}; use crate::api::{ApiResult, EmptyResult, JsonResult};
use crate::auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp}; use crate::auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp};
use crate::config::ConfigBuilder; use crate::config::ConfigBuilder;
use crate::db::{models::*, DbConn}; use crate::db::{backup_database, models::*, DbConn};
use crate::error::Error; use crate::error::Error;
use crate::mail; use crate::mail;
use crate::CONFIG; use crate::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
if CONFIG.admin_token().is_none() { if CONFIG.admin_token().is_none() && !CONFIG.disable_admin_token() {
return Vec::new(); return routes![admin_disabled];
} }
routes![ routes![
admin_login, admin_login,
get_users,
post_admin_login, post_admin_login,
admin_page, admin_page,
invite_user, invite_user,
delete_user, delete_user,
deauth_user, deauth_user,
remove_2fa,
update_revision_users,
post_config, post_config,
delete_config, delete_config,
backup_db,
] ]
} }
lazy_static! {
static ref CAN_BACKUP: bool = cfg!(feature = "sqlite") && Command::new("sqlite3").arg("-version").status().is_ok();
}
#[get("/")]
fn admin_disabled() -> &'static str {
"The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it"
}
const COOKIE_NAME: &str = "BWRS_ADMIN"; const COOKIE_NAME: &str = "BWRS_ADMIN";
const ADMIN_PATH: &str = "/admin"; const ADMIN_PATH: &str = "/admin";
const BASE_TEMPLATE: &str = "admin/base"; const BASE_TEMPLATE: &str = "admin/base";
const VERSION: Option<&str> = option_env!("GIT_VERSION");
#[get("/", rank = 2)] #[get("/", rank = 2)]
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> { fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
// If there is an error, show it // If there is an error, show it
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg())); let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
let json = json!({"page_content": "admin/login", "error": msg}); let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg});
// Return the page // Return the page
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?; let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
@@ -83,23 +98,27 @@ fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -
fn _validate_token(token: &str) -> bool { fn _validate_token(token: &str) -> bool {
match CONFIG.admin_token().as_ref() { match CONFIG.admin_token().as_ref() {
None => false, None => false,
Some(t) => t == token, Some(t) => crate::crypto::ct_eq(t.trim(), token.trim()),
} }
} }
#[derive(Serialize)] #[derive(Serialize)]
struct AdminTemplateData { struct AdminTemplateData {
users: Vec<Value>,
page_content: String, page_content: String,
version: Option<&'static str>,
users: Vec<Value>,
config: Value, config: Value,
can_backup: bool,
} }
impl AdminTemplateData { impl AdminTemplateData {
fn new(users: Vec<Value>) -> Self { fn new(users: Vec<Value>) -> Self {
Self { Self {
users,
page_content: String::from("admin/page"), page_content: String::from("admin/page"),
version: VERSION,
users,
config: CONFIG.prepare_json(), config: CONFIG.prepare_json(),
can_backup: *CAN_BACKUP,
} }
} }
@@ -135,17 +154,26 @@ fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> Empt
err!("Invitations are not allowed") err!("Invitations are not allowed")
} }
let mut user = User::new(email);
user.save(&conn)?;
if CONFIG.mail_enabled() { if CONFIG.mail_enabled() {
let mut user = User::new(email);
user.save(&conn)?;
let org_name = "bitwarden_rs"; let org_name = "bitwarden_rs";
mail::send_invite(&user.email, &user.uuid, None, None, &org_name, None) mail::send_invite(&user.email, &user.uuid, None, None, &org_name, None)
} else { } else {
let mut invitation = Invitation::new(data.email); let invitation = Invitation::new(data.email);
invitation.save(&conn) invitation.save(&conn)
} }
} }
#[get("/users")]
fn get_users(_token: AdminToken, conn: DbConn) -> JsonResult {
let users = User::get_all(&conn);
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
Ok(Json(Value::Array(users_json)))
}
#[post("/users/<uuid>/delete")] #[post("/users/<uuid>/delete")]
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let user = match User::find_by_uuid(&uuid, &conn) { let user = match User::find_by_uuid(&uuid, &conn) {
@@ -163,11 +191,29 @@ fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
None => err!("User doesn't exist"), None => err!("User doesn't exist"),
}; };
Device::delete_all_by_user(&user.uuid, &conn)?;
user.reset_security_stamp(); user.reset_security_stamp();
user.save(&conn) user.save(&conn)
} }
#[post("/users/<uuid>/remove-2fa")]
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let mut user = match User::find_by_uuid(&uuid, &conn) {
Some(user) => user,
None => err!("User doesn't exist"),
};
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
user.totp_recover = None;
user.save(&conn)
}
#[post("/users/update_revision")]
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
User::update_all_revisions(&conn)
}
#[post("/config", data = "<data>")] #[post("/config", data = "<data>")]
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult { fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
let data: ConfigBuilder = data.into_inner(); let data: ConfigBuilder = data.into_inner();
@@ -179,31 +225,44 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
CONFIG.delete_user_config() CONFIG.delete_user_config()
} }
#[post("/config/backup_db")]
fn backup_db(_token: AdminToken) -> EmptyResult {
if *CAN_BACKUP {
backup_database()
} else {
err!("Can't back up current DB (either it's not SQLite or the 'sqlite' binary is not present)");
}
}
pub struct AdminToken {} pub struct AdminToken {}
impl<'a, 'r> FromRequest<'a, 'r> for AdminToken { impl<'a, 'r> FromRequest<'a, 'r> for AdminToken {
type Error = &'static str; type Error = &'static str;
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> { fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
let mut cookies = request.cookies(); if CONFIG.disable_admin_token() {
Outcome::Success(AdminToken {})
} else {
let mut cookies = request.cookies();
let access_token = match cookies.get(COOKIE_NAME) { let access_token = match cookies.get(COOKIE_NAME) {
Some(cookie) => cookie.value(), Some(cookie) => cookie.value(),
None => return Outcome::Forward(()), // If there is no cookie, redirect to login None => return Outcome::Forward(()), // If there is no cookie, redirect to login
}; };
let ip = match request.guard::<ClientIp>() { let ip = match request.guard::<ClientIp>() {
Outcome::Success(ip) => ip.ip, Outcome::Success(ip) => ip.ip,
_ => err_handler!("Error getting Client IP"), _ => err_handler!("Error getting Client IP"),
}; };
if decode_admin(access_token).is_err() { if decode_admin(access_token).is_err() {
// Remove admin cookie // Remove admin cookie
cookies.remove(Cookie::named(COOKIE_NAME)); cookies.remove(Cookie::named(COOKIE_NAME));
error!("Invalid or expired admin JWT. IP: {}.", ip); error!("Invalid or expired admin JWT. IP: {}.", ip);
return Outcome::Forward(()); return Outcome::Forward(());
}
Outcome::Success(AdminToken {})
} }
Outcome::Success(AdminToken {})
} }
} }

View File

@@ -106,7 +106,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
} }
user.set_password(&data.MasterPasswordHash); user.set_password(&data.MasterPasswordHash);
user.key = data.Key; user.akey = data.Key;
// Add extra fields if present // Add extra fields if present
if let Some(name) = data.Name { if let Some(name) = data.Name {
@@ -204,7 +204,7 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
} }
user.set_password(&data.NewMasterPasswordHash); user.set_password(&data.NewMasterPasswordHash);
user.key = data.Key; user.akey = data.Key;
user.save(&conn) user.save(&conn)
} }
@@ -231,7 +231,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
user.client_kdf_iter = data.KdfIterations; user.client_kdf_iter = data.KdfIterations;
user.client_kdf_type = data.Kdf; user.client_kdf_type = data.Kdf;
user.set_password(&data.NewMasterPasswordHash); user.set_password(&data.NewMasterPasswordHash);
user.key = data.Key; user.akey = data.Key;
user.save(&conn) user.save(&conn)
} }
@@ -306,7 +306,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
// Update user data // Update user data
let mut user = headers.user; let mut user = headers.user;
user.key = data.Key; user.akey = data.Key;
user.private_key = Some(data.PrivateKey); user.private_key = Some(data.PrivateKey);
user.reset_security_stamp(); user.reset_security_stamp();
@@ -322,6 +322,7 @@ fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -
err!("Invalid password") err!("Invalid password")
} }
Device::delete_all_by_user(&user.uuid, &conn)?;
user.reset_security_stamp(); user.reset_security_stamp();
user.save(&conn) user.save(&conn)
} }
@@ -376,7 +377,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
user.email = data.NewEmail; user.email = data.NewEmail;
user.set_password(&data.NewMasterPasswordHash); user.set_password(&data.NewMasterPasswordHash);
user.key = data.Key; user.akey = data.Key;
user.save(&conn) user.save(&conn)
} }

View File

@@ -74,10 +74,10 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
let user_json = headers.user.to_json(&conn); let user_json = headers.user.to_json(&conn);
let folders = Folder::find_by_user(&headers.user.uuid, &conn); let folders = Folder::find_by_user(&headers.user.uuid, &conn);
let folders_json: Vec<Value> = folders.iter().map(|c| c.to_json()).collect(); let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn); let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
let collections_json: Vec<Value> = collections.iter().map(|c| c.to_json()).collect(); let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn); let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
let ciphers_json: Vec<Value> = ciphers let ciphers_json: Vec<Value> = ciphers
@@ -267,7 +267,7 @@ pub fn update_cipher_from_data(
err!("Attachment is not owned by the cipher") err!("Attachment is not owned by the cipher")
} }
saved_att.key = Some(attachment.Key); saved_att.akey = Some(attachment.Key);
saved_att.file_name = attachment.FileName; saved_att.file_name = attachment.FileName;
saved_att.save(&conn)?; saved_att.save(&conn)?;
@@ -608,7 +608,7 @@ fn share_cipher_by_uuid(
None => err!("Invalid collection ID provided"), None => err!("Invalid collection ID provided"),
Some(collection) => { Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, &conn) { if collection.is_writable_by_user(&headers.user.uuid, &conn) {
CollectionCipher::save(&cipher.uuid.clone(), &collection.uuid, &conn)?; CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn)?;
shared_to_collection = true; shared_to_collection = true;
} else { } else {
err!("No rights to modify the collection") err!("No rights to modify the collection")
@@ -691,7 +691,7 @@ fn post_attachment(
}; };
let mut attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size); let mut attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size);
attachment.key = attachment_key.clone(); attachment.akey = attachment_key.clone();
attachment.save(&conn).expect("Error saving attachment"); attachment.save(&conn).expect("Error saving attachment");
} }
_ => error!("Invalid multipart name"), _ => error!("Invalid multipart name"),
@@ -854,11 +854,7 @@ fn move_cipher_selected(data: JsonUpcase<MoveCipherData>, headers: Headers, conn
// Move cipher // Move cipher
cipher.move_to_folder(data.FolderId.clone(), &user_uuid, &conn)?; cipher.move_to_folder(data.FolderId.clone(), &user_uuid, &conn)?;
nt.send_cipher_update( nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &[user_uuid.clone()]);
UpdateType::CipherUpdate,
&cipher,
&[user_uuid.clone()]
);
} }
Ok(()) Ok(())
@@ -874,8 +870,20 @@ fn move_cipher_selected_put(
move_cipher_selected(data, headers, conn, nt) move_cipher_selected(data, headers, conn, nt)
} }
#[post("/ciphers/purge", data = "<data>")] #[derive(FromForm)]
fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { struct OrganizationId {
#[form(field = "organizationId")]
org_id: String,
}
#[post("/ciphers/purge?<organization..>", data = "<data>")]
fn delete_all(
organization: Option<Form<OrganizationId>>,
data: JsonUpcase<PasswordData>,
headers: Headers,
conn: DbConn,
nt: Notify,
) -> EmptyResult {
let data: PasswordData = data.into_inner().data; let data: PasswordData = data.into_inner().data;
let password_hash = data.MasterPasswordHash; let password_hash = data.MasterPasswordHash;
@@ -885,19 +893,40 @@ fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn, nt
err!("Invalid password") err!("Invalid password")
} }
// Delete ciphers and their attachments match organization {
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn) { Some(org_data) => {
cipher.delete(&conn)?; // Organization ID in query params, purging organization vault
} match UserOrganization::find_by_user_and_org(&user.uuid, &org_data.org_id, &conn) {
None => err!("You don't have permission to purge the organization vault"),
Some(user_org) => {
if user_org.atype == UserOrgType::Owner {
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
Collection::delete_all_by_organization(&org_data.org_id, &conn)?;
nt.send_user_update(UpdateType::Vault, &user);
Ok(())
} else {
err!("You don't have permission to purge the organization vault");
}
}
}
}
None => {
// No organization ID in query params, purging user vault
// Delete ciphers and their attachments
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn) {
cipher.delete(&conn)?;
}
// Delete folders // Delete folders
for f in Folder::find_by_user(&user.uuid, &conn) { for f in Folder::find_by_user(&user.uuid, &conn) {
f.delete(&conn)?; f.delete(&conn)?;
} }
user.update_revision(&conn)?; user.update_revision(&conn)?;
nt.send_user_update(UpdateType::Vault, &user); nt.send_user_update(UpdateType::Vault, &user);
Ok(()) Ok(())
}
}
} }
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult { fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult {

View File

@@ -25,7 +25,7 @@ pub fn routes() -> Vec<Route> {
fn get_folders(headers: Headers, conn: DbConn) -> JsonResult { fn get_folders(headers: Headers, conn: DbConn) -> JsonResult {
let folders = Folder::find_by_user(&headers.user.uuid, &conn); let folders = Folder::find_by_user(&headers.user.uuid, &conn);
let folders_json: Vec<Value> = folders.iter().map(|c| c.to_json()).collect(); let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
Ok(Json(json!({ Ok(Json(json!({
"Data": folders_json, "Data": folders_json,

View File

@@ -33,10 +33,10 @@ use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use crate::db::DbConn;
use crate::api::{EmptyResult, JsonResult, JsonUpcase}; use crate::api::{EmptyResult, JsonResult, JsonUpcase};
use crate::auth::Headers; use crate::auth::Headers;
use crate::db::DbConn;
use crate::error::Error;
#[put("/devices/identifier/<uuid>/clear-token")] #[put("/devices/identifier/<uuid>/clear-token")]
fn clear_device_token(uuid: String) -> EmptyResult { fn clear_device_token(uuid: String) -> EmptyResult {
@@ -63,7 +63,7 @@ fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) ->
Ok(Json(json!({ Ok(Json(json!({
"Id": headers.device.uuid, "Id": headers.device.uuid,
"Name": headers.device.name, "Name": headers.device.name,
"Type": headers.device.type_, "Type": headers.device.atype,
"Identifier": headers.device.uuid, "Identifier": headers.device.uuid,
"CreationDate": crate::util::format_date(&headers.device.created_at), "CreationDate": crate::util::format_date(&headers.device.created_at),
}))) })))
@@ -132,17 +132,33 @@ fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbC
#[get("/hibp/breach?<username>")] #[get("/hibp/breach?<username>")]
fn hibp_breach(username: String) -> JsonResult { fn hibp_breach(username: String) -> JsonResult {
let url = format!("https://haveibeenpwned.com/api/v2/breachedaccount/{}", username);
let user_agent = "Bitwarden_RS"; let user_agent = "Bitwarden_RS";
let url = format!(
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
username
);
use reqwest::{header::USER_AGENT, Client}; use reqwest::{header::USER_AGENT, Client};
let value: Value = Client::new() if let Some(api_key) = crate::CONFIG.hibp_api_key() {
.get(&url) let res = Client::new()
.header(USER_AGENT, user_agent) .get(&url)
.send()? .header(USER_AGENT, user_agent)
.error_for_status()? .header("hibp-api-key", api_key)
.json()?; .send()?;
Ok(Json(value)) // If we get a 404, return a 404, it means no breached accounts
if res.status() == 404 {
return Err(Error::empty().with_code(404));
}
let value: Value = res.error_for_status()?.json()?;
Ok(Json(value))
} else {
Ok(Json(json!([{
"title": "--- Error! ---",
"description": "HaveIBeenPwned API key not set! Go to https://haveibeenpwned.com/API/Key",
"logopath": "/bwrs_static/error-x.svg"
}])))
}
} }

View File

@@ -76,13 +76,13 @@ struct NewCollectionData {
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult { fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
let data: OrgData = data.into_inner().data; let data: OrgData = data.into_inner().data;
let mut org = Organization::new(data.Name, data.BillingEmail); let org = Organization::new(data.Name, data.BillingEmail);
let mut user_org = UserOrganization::new(headers.user.uuid.clone(), org.uuid.clone()); let mut user_org = UserOrganization::new(headers.user.uuid.clone(), org.uuid.clone());
let mut collection = Collection::new(org.uuid.clone(), data.CollectionName); let collection = Collection::new(org.uuid.clone(), data.CollectionName);
user_org.key = data.Key; user_org.akey = data.Key;
user_org.access_all = true; user_org.access_all = true;
user_org.type_ = UserOrgType::Owner as i32; user_org.atype = UserOrgType::Owner as i32;
user_org.status = UserOrgStatus::Confirmed as i32; user_org.status = UserOrgStatus::Confirmed as i32;
org.save(&conn)?; org.save(&conn)?;
@@ -127,7 +127,7 @@ fn leave_organization(org_id: String, headers: Headers, conn: DbConn) -> EmptyRe
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) { match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
None => err!("User not part of organization"), None => err!("User not part of organization"),
Some(user_org) => { Some(user_org) => {
if user_org.type_ == UserOrgType::Owner { if user_org.atype == UserOrgType::Owner {
let num_owners = let num_owners =
UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len(); UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
@@ -221,7 +221,7 @@ fn post_organization_collections(
None => err!("Can't find organization details"), None => err!("Can't find organization details"),
}; };
let mut collection = Collection::new(org.uuid.clone(), data.Name); let collection = Collection::new(org.uuid.clone(), data.Name);
collection.save(&conn)?; collection.save(&conn)?;
Ok(Json(collection.to_json())) Ok(Json(collection.to_json()))
@@ -484,7 +484,7 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
} }
if !CONFIG.mail_enabled() { if !CONFIG.mail_enabled() {
let mut invitation = Invitation::new(email.clone()); let invitation = Invitation::new(email.clone());
invitation.save(&conn)?; invitation.save(&conn)?;
} }
@@ -505,7 +505,7 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
let mut new_user = UserOrganization::new(user.uuid.clone(), org_id.clone()); let mut new_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
let access_all = data.AccessAll.unwrap_or(false); let access_all = data.AccessAll.unwrap_or(false);
new_user.access_all = access_all; new_user.access_all = access_all;
new_user.type_ = new_type; new_user.atype = new_type;
new_user.status = user_org_status; new_user.status = user_org_status;
// If no accessAll, add the collections received // If no accessAll, add the collections received
@@ -581,7 +581,7 @@ fn reinvite_user(org_id: String, user_org: String, headers: AdminHeaders, conn:
Some(headers.user.email), Some(headers.user.email),
)?; )?;
} else { } else {
let mut invitation = Invitation::new(user.email.clone()); let invitation = Invitation::new(user.email.clone());
invitation.save(&conn)?; invitation.save(&conn)?;
} }
@@ -657,7 +657,7 @@ fn confirm_invite(
None => err!("The specified user isn't a member of the organization"), None => err!("The specified user isn't a member of the organization"),
}; };
if user_to_confirm.type_ != UserOrgType::User && headers.org_user_type != UserOrgType::Owner { if user_to_confirm.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
err!("Only Owners can confirm Managers, Admins or Owners") err!("Only Owners can confirm Managers, Admins or Owners")
} }
@@ -666,7 +666,7 @@ fn confirm_invite(
} }
user_to_confirm.status = UserOrgStatus::Confirmed as i32; user_to_confirm.status = UserOrgStatus::Confirmed as i32;
user_to_confirm.key = match data["Key"].as_str() { user_to_confirm.akey = match data["Key"].as_str() {
Some(key) => key.to_string(), Some(key) => key.to_string(),
None => err!("Invalid key provided"), None => err!("Invalid key provided"),
}; };
@@ -735,18 +735,18 @@ fn edit_user(
None => err!("The specified user isn't member of the organization"), None => err!("The specified user isn't member of the organization"),
}; };
if new_type != user_to_edit.type_ if new_type != user_to_edit.atype
&& (user_to_edit.type_ >= UserOrgType::Admin || new_type >= UserOrgType::Admin) && (user_to_edit.atype >= UserOrgType::Admin || new_type >= UserOrgType::Admin)
&& headers.org_user_type != UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner
{ {
err!("Only Owners can grant and remove Admin or Owner privileges") err!("Only Owners can grant and remove Admin or Owner privileges")
} }
if user_to_edit.type_ == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner { if user_to_edit.atype == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner {
err!("Only Owners can edit Owner users") err!("Only Owners can edit Owner users")
} }
if user_to_edit.type_ == UserOrgType::Owner && new_type != UserOrgType::Owner { if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
// Removing owner permmission, check that there are at least another owner // Removing owner permmission, check that there are at least another owner
let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len(); let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
@@ -756,7 +756,7 @@ fn edit_user(
} }
user_to_edit.access_all = data.AccessAll; user_to_edit.access_all = data.AccessAll;
user_to_edit.type_ = new_type as i32; user_to_edit.atype = new_type as i32;
// Delete all the odd collections // Delete all the odd collections
for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &user_to_edit.user_uuid, &conn) { for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &user_to_edit.user_uuid, &conn) {
@@ -785,11 +785,11 @@ fn delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn:
None => err!("User to delete isn't member of the organization"), None => err!("User to delete isn't member of the organization"),
}; };
if user_to_delete.type_ != UserOrgType::User && headers.org_user_type != UserOrgType::Owner { if user_to_delete.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
err!("Only Owners can delete Admins or Owners") err!("Only Owners can delete Admins or Owners")
} }
if user_to_delete.type_ == UserOrgType::Owner { if user_to_delete.atype == UserOrgType::Owner {
// Removing owner, check that there are at least another owner // Removing owner, check that there are at least another owner
let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len(); let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
@@ -842,7 +842,7 @@ fn post_org_import(
None => err!("User is not part of the organization"), None => err!("User is not part of the organization"),
}; };
if org_user.type_ < UserOrgType::Admin { if org_user.atype < UserOrgType::Admin {
err!("Only admins or owners can import into an organization") err!("Only admins or owners can import into an organization")
} }
@@ -851,7 +851,7 @@ fn post_org_import(
.Collections .Collections
.into_iter() .into_iter()
.map(|coll| { .map(|coll| {
let mut collection = Collection::new(org_id.clone(), coll.Name); let collection = Collection::new(org_id.clone(), coll.Name);
if collection.save(&conn).is_err() { if collection.save(&conn).is_err() {
err!("Failed to create Collection"); err!("Failed to create Collection");
} }

View File

@@ -1,657 +0,0 @@
use data_encoding::BASE32;
use rocket_contrib::json::Json;
use serde_json;
use serde_json::Value;
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
use crate::auth::Headers;
use crate::crypto;
use crate::db::{
models::{TwoFactor, TwoFactorType, User},
DbConn,
};
use crate::error::{Error, MapResult};
use rocket::Route;
pub fn routes() -> Vec<Route> {
routes![
get_twofactor,
get_recover,
recover,
disable_twofactor,
disable_twofactor_put,
generate_authenticator,
activate_authenticator,
activate_authenticator_put,
generate_u2f,
generate_u2f_challenge,
activate_u2f,
activate_u2f_put,
generate_yubikey,
activate_yubikey,
activate_yubikey_put,
]
}
#[get("/two-factor")]
fn get_twofactor(headers: Headers, conn: DbConn) -> JsonResult {
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
let twofactors_json: Vec<Value> = twofactors.iter().map(|c| c.to_json_list()).collect();
Ok(Json(json!({
"Data": twofactors_json,
"Object": "list",
"ContinuationToken": null,
})))
}
#[post("/two-factor/get-recover", data = "<data>")]
fn get_recover(data: JsonUpcase<PasswordData>, headers: Headers) -> JsonResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
Ok(Json(json!({
"Code": user.totp_recover,
"Object": "twoFactorRecover"
})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct RecoverTwoFactor {
MasterPasswordHash: String,
Email: String,
RecoveryCode: String,
}
#[post("/two-factor/recover", data = "<data>")]
fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
let data: RecoverTwoFactor = data.into_inner().data;
use crate::db::models::User;
// Get the user
let mut user = match User::find_by_mail(&data.Email, &conn) {
Some(user) => user,
None => err!("Username or password is incorrect. Try again."),
};
// Check password
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Username or password is incorrect. Try again.")
}
// Check if recovery code is correct
if !user.check_valid_recovery_code(&data.RecoveryCode) {
err!("Recovery code is incorrect. Try again.")
}
// Remove all twofactors from the user
for twofactor in TwoFactor::find_by_user(&user.uuid, &conn) {
twofactor.delete(&conn)?;
}
// Remove the recovery code, not needed without twofactors
user.totp_recover = None;
user.save(&conn)?;
Ok(Json(json!({})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct DisableTwoFactorData {
MasterPasswordHash: String,
Type: NumberOrString,
}
#[post("/two-factor/disable", data = "<data>")]
fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
let user = headers.user;
if !user.check_valid_password(&password_hash) {
err!("Invalid password");
}
let type_ = data.Type.into_i32().expect("Invalid type");
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
twofactor.delete(&conn)?;
}
Ok(Json(json!({
"Enabled": false,
"Type": type_,
"Object": "twoFactorProvider"
})))
}
#[put("/two-factor/disable", data = "<data>")]
fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
disable_twofactor(data, headers, conn)
}
#[post("/two-factor/get-authenticator", data = "<data>")]
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let type_ = TwoFactorType::Authenticator as i32;
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn);
let (enabled, key) = match twofactor {
Some(tf) => (true, tf.data),
_ => (false, BASE32.encode(&crypto::get_random(vec![0u8; 20]))),
};
Ok(Json(json!({
"Enabled": enabled,
"Key": key,
"Object": "twoFactorAuthenticator"
})))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableAuthenticatorData {
MasterPasswordHash: String,
Key: String,
Token: NumberOrString,
}
#[post("/two-factor/authenticator", data = "<data>")]
fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableAuthenticatorData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
let key = data.Key;
let token = match data.Token.into_i32() {
Some(n) => n as u64,
None => err!("Malformed token"),
};
let mut user = headers.user;
if !user.check_valid_password(&password_hash) {
err!("Invalid password");
}
// Validate key as base32 and 20 bytes length
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
Ok(decoded) => decoded,
_ => err!("Invalid totp secret"),
};
if decoded_key.len() != 20 {
err!("Invalid key length")
}
let type_ = TwoFactorType::Authenticator;
let twofactor = TwoFactor::new(user.uuid.clone(), type_, key.to_uppercase());
// Validate the token provided with the key
if !twofactor.check_totp_code(token) {
err!("Invalid totp code")
}
_generate_recover_code(&mut user, &conn);
twofactor.save(&conn)?;
Ok(Json(json!({
"Enabled": true,
"Key": key,
"Object": "twoFactorAuthenticator"
})))
}
#[put("/two-factor/authenticator", data = "<data>")]
fn activate_authenticator_put(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_authenticator(data, headers, conn)
}
fn _generate_recover_code(user: &mut User, conn: &DbConn) {
if user.totp_recover.is_none() {
let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20]));
user.totp_recover = Some(totp_recover);
user.save(conn).ok();
}
}
use u2f::messages::{RegisterResponse, SignResponse, U2fSignRequest};
use u2f::protocol::{Challenge, U2f};
use u2f::register::Registration;
use crate::CONFIG;
const U2F_VERSION: &str = "U2F_V2";
lazy_static! {
static ref APP_ID: String = format!("{}/app-id.json", &CONFIG.domain());
static ref U2F: U2f = U2f::new(APP_ID.clone());
}
#[post("/two-factor/get-u2f", data = "<data>")]
fn generate_u2f(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
if !CONFIG.domain_set() {
err!("`DOMAIN` environment variable is not set. U2F disabled")
}
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let u2f_type = TwoFactorType::U2f as i32;
let enabled = TwoFactor::find_by_user_and_type(&user.uuid, u2f_type, &conn).is_some();
Ok(Json(json!({
"Enabled": enabled,
"Object": "twoFactorU2f"
})))
}
#[post("/two-factor/get-u2f-challenge", data = "<data>")]
fn generate_u2f_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let user_uuid = &user.uuid;
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fRegisterChallenge, &conn).challenge;
Ok(Json(json!({
"UserId": user.uuid,
"AppId": APP_ID.to_string(),
"Challenge": challenge,
"Version": U2F_VERSION,
})))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableU2FData {
Id: NumberOrString, // 1..5
Name: String,
MasterPasswordHash: String,
DeviceResponse: String,
}
// This struct is copied from the U2F lib
// to add an optional error code
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct RegisterResponseCopy {
pub registration_data: String,
pub version: String,
pub client_data: String,
pub error_code: Option<NumberOrString>,
}
impl RegisterResponseCopy {
fn into_response(self) -> RegisterResponse {
RegisterResponse {
registration_data: self.registration_data,
version: self.version,
client_data: self.client_data,
}
}
}
#[post("/two-factor/u2f", data = "<data>")]
fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableU2FData = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let tf_type = TwoFactorType::U2fRegisterChallenge as i32;
let tf_challenge = match TwoFactor::find_by_user_and_type(&user.uuid, tf_type, &conn) {
Some(c) => c,
None => err!("Can't recover challenge"),
};
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
tf_challenge.delete(&conn)?;
let response_copy: RegisterResponseCopy = serde_json::from_str(&data.DeviceResponse)?;
let error_code = response_copy
.error_code
.clone()
.map_or("0".into(), NumberOrString::into_string);
if error_code != "0" {
err!("Error registering U2F token")
}
let response = response_copy.into_response();
let registration = U2F.register_response(challenge.clone(), response)?;
// TODO: Allow more than one U2F device
let mut registrations = Vec::new();
registrations.push(registration);
let tf_registration = TwoFactor::new(
user.uuid.clone(),
TwoFactorType::U2f,
serde_json::to_string(&registrations).unwrap(),
);
tf_registration.save(&conn)?;
_generate_recover_code(&mut user, &conn);
Ok(Json(json!({
"Enabled": true,
"Challenge": {
"UserId": user.uuid,
"AppId": APP_ID.to_string(),
"Challenge": challenge,
"Version": U2F_VERSION,
},
"Object": "twoFactorU2f"
})))
}
#[put("/two-factor/u2f", data = "<data>")]
fn activate_u2f_put(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_u2f(data, headers, conn)
}
fn _create_u2f_challenge(user_uuid: &str, type_: TwoFactorType, conn: &DbConn) -> Challenge {
let challenge = U2F.generate_challenge().unwrap();
TwoFactor::new(user_uuid.into(), type_, serde_json::to_string(&challenge).unwrap())
.save(conn)
.expect("Error saving challenge");
challenge
}
// This struct is copied from the U2F lib
// because it doesn't implement Deserialize
#[derive(Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
struct RegistrationCopy {
pub key_handle: Vec<u8>,
pub pub_key: Vec<u8>,
pub attestation_cert: Option<Vec<u8>>,
}
impl Into<Registration> for RegistrationCopy {
fn into(self) -> Registration {
Registration {
key_handle: self.key_handle,
pub_key: self.pub_key,
attestation_cert: self.attestation_cert,
}
}
}
fn _parse_registrations(registations: &str) -> Vec<Registration> {
let registrations_copy: Vec<RegistrationCopy> =
serde_json::from_str(registations).expect("Can't parse RegistrationCopy data");
registrations_copy.into_iter().map(Into::into).collect()
}
pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRequest> {
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn);
let type_ = TwoFactorType::U2f as i32;
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
Some(tf) => tf,
None => err!("No U2F devices registered"),
};
let registrations = _parse_registrations(&twofactor.data);
let signed_request: U2fSignRequest = U2F.sign_request(challenge, registrations);
Ok(signed_request)
}
pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
let challenge_type = TwoFactorType::U2fLoginChallenge as i32;
let u2f_type = TwoFactorType::U2f as i32;
let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, &conn);
let challenge = match tf_challenge {
Some(tf_challenge) => {
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
tf_challenge.delete(&conn)?;
challenge
}
None => err!("Can't recover login challenge"),
};
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, u2f_type, conn) {
Some(tf) => tf,
None => err!("No U2F devices registered"),
};
let registrations = _parse_registrations(&twofactor.data);
let response: SignResponse = serde_json::from_str(response)?;
let mut _counter: u32 = 0;
for registration in registrations {
let response = U2F.sign_response(challenge.clone(), registration, response.clone(), _counter);
match response {
Ok(new_counter) => {
_counter = new_counter;
info!("O {:#}", new_counter);
return Ok(());
}
Err(e) => {
info!("E {:#}", e);
// break;
}
}
}
err!("error verifying response")
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableYubikeyData {
MasterPasswordHash: String,
Key1: Option<String>,
Key2: Option<String>,
Key3: Option<String>,
Key4: Option<String>,
Key5: Option<String>,
Nfc: bool,
}
#[derive(Deserialize, Serialize, Debug)]
#[allow(non_snake_case)]
pub struct YubikeyMetadata {
Keys: Vec<String>,
pub Nfc: bool,
}
use yubico::config::Config;
use yubico::Yubico;
fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
let data_keys = [&data.Key1, &data.Key2, &data.Key3, &data.Key4, &data.Key5];
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
}
fn jsonify_yubikeys(yubikeys: Vec<String>) -> serde_json::Value {
let mut result = json!({});
for (i, key) in yubikeys.into_iter().enumerate() {
result[format!("Key{}", i + 1)] = Value::String(key);
}
result
}
fn get_yubico_credentials() -> Result<(String, String), Error> {
match (CONFIG.yubico_client_id(), CONFIG.yubico_secret_key()) {
(Some(id), Some(secret)) => Ok((id, secret)),
_ => err!("`YUBICO_CLIENT_ID` or `YUBICO_SECRET_KEY` environment variable is not set. Yubikey OTP Disabled"),
}
}
fn verify_yubikey_otp(otp: String) -> EmptyResult {
let (yubico_id, yubico_secret) = get_yubico_credentials()?;
let yubico = Yubico::new();
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
match CONFIG.yubico_server() {
Some(server) => yubico.verify(otp, config.set_api_hosts(vec![server])),
None => yubico.verify(otp, config),
}
.map_res("Failed to verify OTP")
.and(Ok(()))
}
#[post("/two-factor/get-yubikey", data = "<data>")]
fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
// Make sure the credentials are set
get_yubico_credentials()?;
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let user_uuid = &user.uuid;
let yubikey_type = TwoFactorType::YubiKey as i32;
let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &conn);
if let Some(r) = r {
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
result["Enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result))
} else {
Ok(Json(json!({
"Enabled": false,
"Object": "twoFactorU2f",
})))
}
}
#[post("/two-factor/yubikey", data = "<data>")]
fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableYubikeyData = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
// Check if we already have some data
let yubikey_data = TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &conn);
if let Some(yubikey_data) = yubikey_data {
yubikey_data.delete(&conn)?;
}
let yubikeys = parse_yubikeys(&data);
if yubikeys.is_empty() {
return Ok(Json(json!({
"Enabled": false,
"Object": "twoFactorU2f",
})));
}
// Ensure they are valid OTPs
for yubikey in &yubikeys {
if yubikey.len() == 12 {
// YubiKey ID
continue;
}
verify_yubikey_otp(yubikey.to_owned()).map_res("Invalid Yubikey OTP provided")?;
}
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (&x[..12]).to_owned()).collect();
let yubikey_metadata = YubikeyMetadata {
Keys: yubikey_ids,
Nfc: data.Nfc,
};
let yubikey_registration = TwoFactor::new(
user.uuid.clone(),
TwoFactorType::YubiKey,
serde_json::to_string(&yubikey_metadata).unwrap(),
);
yubikey_registration.save(&conn)?;
_generate_recover_code(&mut user, &conn);
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
result["Enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result))
}
#[put("/two-factor/yubikey", data = "<data>")]
fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_yubikey(data, headers, conn)
}
pub fn validate_yubikey_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
if response.len() != 44 {
err!("Invalid Yubikey OTP length");
}
let yubikey_type = TwoFactorType::YubiKey as i32;
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &conn) {
Some(tf) => tf,
None => err!("No YubiKey devices registered"),
};
let yubikey_metadata: YubikeyMetadata =
serde_json::from_str(&twofactor.data).expect("Can't parse Yubikey Metadata");
let response_id = &response[..12];
if !yubikey_metadata.Keys.contains(&response_id.to_owned()) {
err!("Given Yubikey is not registered");
}
let result = verify_yubikey_otp(response.to_owned());
match result {
Ok(_answer) => Ok(()),
Err(_e) => err!("Failed to verify Yubikey against OTP server"),
}
}

View File

@@ -0,0 +1,120 @@
use data_encoding::BASE32;
use rocket::Route;
use rocket_contrib::json::Json;
use crate::api::core::two_factor::_generate_recover_code;
use crate::api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
use crate::auth::Headers;
use crate::crypto;
use crate::db::{
models::{TwoFactor, TwoFactorType},
DbConn,
};
pub fn routes() -> Vec<Route> {
routes![
generate_authenticator,
activate_authenticator,
activate_authenticator_put,
]
}
#[post("/two-factor/get-authenticator", data = "<data>")]
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let type_ = TwoFactorType::Authenticator as i32;
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn);
let (enabled, key) = match twofactor {
Some(tf) => (true, tf.data),
_ => (false, BASE32.encode(&crypto::get_random(vec![0u8; 20]))),
};
Ok(Json(json!({
"Enabled": enabled,
"Key": key,
"Object": "twoFactorAuthenticator"
})))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableAuthenticatorData {
MasterPasswordHash: String,
Key: String,
Token: NumberOrString,
}
#[post("/two-factor/authenticator", data = "<data>")]
fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableAuthenticatorData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
let key = data.Key;
let token = data.Token.into_i32()? as u64;
let mut user = headers.user;
if !user.check_valid_password(&password_hash) {
err!("Invalid password");
}
// Validate key as base32 and 20 bytes length
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
Ok(decoded) => decoded,
_ => err!("Invalid totp secret"),
};
if decoded_key.len() != 20 {
err!("Invalid key length")
}
let type_ = TwoFactorType::Authenticator;
let twofactor = TwoFactor::new(user.uuid.clone(), type_, key.to_uppercase());
// Validate the token provided with the key
validate_totp_code(token, &twofactor.data)?;
_generate_recover_code(&mut user, &conn);
twofactor.save(&conn)?;
Ok(Json(json!({
"Enabled": true,
"Key": key,
"Object": "twoFactorAuthenticator"
})))
}
#[put("/two-factor/authenticator", data = "<data>")]
fn activate_authenticator_put(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_authenticator(data, headers, conn)
}
pub fn validate_totp_code_str(totp_code: &str, secret: &str) -> EmptyResult {
let totp_code: u64 = match totp_code.parse() {
Ok(code) => code,
_ => err!("TOTP code is not a number"),
};
validate_totp_code(totp_code, secret)
}
pub fn validate_totp_code(totp_code: u64, secret: &str) -> EmptyResult {
use oath::{totp_raw_now, HashType};
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
Ok(s) => s,
Err(_) => err!("Invalid TOTP secret"),
};
let generated = totp_raw_now(&decoded_secret, 6, 0, 30, &HashType::SHA1);
if generated != totp_code {
err!("Invalid TOTP code");
}
Ok(())
}

View File

@@ -0,0 +1,346 @@
use chrono::Utc;
use data_encoding::BASE64;
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json;
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData};
use crate::auth::Headers;
use crate::crypto;
use crate::db::{
models::{TwoFactor, TwoFactorType, User},
DbConn,
};
use crate::error::MapResult;
use crate::CONFIG;
pub fn routes() -> Vec<Route> {
routes![
get_duo,
activate_duo,
activate_duo_put,
]
}
#[derive(Serialize, Deserialize)]
struct DuoData {
host: String,
ik: String,
sk: String,
}
impl DuoData {
fn global() -> Option<Self> {
match (CONFIG._enable_duo(), CONFIG.duo_host()) {
(true, Some(host)) => Some(Self {
host,
ik: CONFIG.duo_ikey().unwrap(),
sk: CONFIG.duo_skey().unwrap(),
}),
_ => None,
}
}
fn msg(s: &str) -> Self {
Self {
host: s.into(),
ik: s.into(),
sk: s.into(),
}
}
fn secret() -> Self {
Self::msg("<global_secret>")
}
fn obscure(self) -> Self {
let mut host = self.host;
let mut ik = self.ik;
let mut sk = self.sk;
let digits = 4;
let replaced = "************";
host.replace_range(digits.., replaced);
ik.replace_range(digits.., replaced);
sk.replace_range(digits.., replaced);
Self { host, ik, sk }
}
}
enum DuoStatus {
Global(DuoData),
// Using the global duo config
User(DuoData),
// Using the user's config
Disabled(bool), // True if there is a global setting
}
impl DuoStatus {
fn data(self) -> Option<DuoData> {
match self {
DuoStatus::Global(data) => Some(data),
DuoStatus::User(data) => Some(data),
DuoStatus::Disabled(_) => None,
}
}
}
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
#[post("/two-factor/get-duo", data = "<data>")]
fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let data = get_user_duo_data(&headers.user.uuid, &conn);
let (enabled, data) = match data {
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
DuoStatus::User(data) => (true, Some(data.obscure())),
DuoStatus::Disabled(true) => (false, Some(DuoData::msg(DISABLED_MESSAGE_DEFAULT))),
DuoStatus::Disabled(false) => (false, None),
};
let json = if let Some(data) = data {
json!({
"Enabled": enabled,
"Host": data.host,
"SecretKey": data.sk,
"IntegrationKey": data.ik,
"Object": "twoFactorDuo"
})
} else {
json!({
"Enabled": enabled,
"Object": "twoFactorDuo"
})
};
Ok(Json(json))
}
#[derive(Deserialize)]
#[allow(non_snake_case, dead_code)]
struct EnableDuoData {
MasterPasswordHash: String,
Host: String,
SecretKey: String,
IntegrationKey: String,
}
impl From<EnableDuoData> for DuoData {
fn from(d: EnableDuoData) -> Self {
Self {
host: d.Host,
ik: d.IntegrationKey,
sk: d.SecretKey,
}
}
}
fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
fn empty_or_default(s: &str) -> bool {
let st = s.trim();
st.is_empty() || s == DISABLED_MESSAGE_DEFAULT
}
!empty_or_default(&data.Host) && !empty_or_default(&data.SecretKey) && !empty_or_default(&data.IntegrationKey)
}
#[post("/two-factor/duo", data = "<data>")]
fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableDuoData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let (data, data_str) = if check_duo_fields_custom(&data) {
let data_req: DuoData = data.into();
let data_str = serde_json::to_string(&data_req)?;
duo_api_request("GET", "/auth/v2/check", "", &data_req).map_res("Failed to validate Duo credentials")?;
(data_req.obscure(), data_str)
} else {
(DuoData::secret(), String::new())
};
let type_ = TwoFactorType::Duo;
let twofactor = TwoFactor::new(headers.user.uuid.clone(), type_, data_str);
twofactor.save(&conn)?;
Ok(Json(json!({
"Enabled": true,
"Host": data.host,
"SecretKey": data.sk,
"IntegrationKey": data.ik,
"Object": "twoFactorDuo"
})))
}
#[put("/two-factor/duo", data = "<data>")]
fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_duo(data, headers, conn)
}
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
use reqwest::{header::*, Client, Method};
use std::str::FromStr;
let url = format!("https://{}{}", &data.host, path);
let date = Utc::now().to_rfc2822();
let username = &data.ik;
let fields = [&date, method, &data.host, path, params];
let password = crypto::hmac_sign(&data.sk, &fields.join("\n"));
let m = Method::from_str(method).unwrap_or_default();
Client::new()
.request(m, &url)
.basic_auth(username, Some(password))
.header(USER_AGENT, AGENT)
.header(DATE, date)
.send()?
.error_for_status()?;
Ok(())
}
const DUO_EXPIRE: i64 = 300;
const APP_EXPIRE: i64 = 3600;
const AUTH_PREFIX: &str = "AUTH";
const DUO_PREFIX: &str = "TX";
const APP_PREFIX: &str = "APP";
fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
let type_ = TwoFactorType::Duo as i32;
// If the user doesn't have an entry, disabled
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, &conn) {
Some(t) => t,
None => return DuoStatus::Disabled(DuoData::global().is_some()),
};
// If the user has the required values, we use those
if let Ok(data) = serde_json::from_str(&twofactor.data) {
return DuoStatus::User(data);
}
// Otherwise, we try to use the globals
if let Some(global) = DuoData::global() {
return DuoStatus::Global(global);
}
// If there are no globals configured, just disable it
DuoStatus::Disabled(false)
}
// let (ik, sk, ak, host) = get_duo_keys();
fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> {
let data = User::find_by_mail(email, &conn)
.and_then(|u| get_user_duo_data(&u.uuid, &conn).data())
.or_else(DuoData::global)
.map_res("Can't fetch Duo keys")?;
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host))
}
pub fn generate_duo_signature(email: &str, conn: &DbConn) -> ApiResult<(String, String)> {
let now = Utc::now().timestamp();
let (ik, sk, ak, host) = get_duo_keys_email(email, conn)?;
let duo_sign = sign_duo_values(&sk, email, &ik, DUO_PREFIX, now + DUO_EXPIRE);
let app_sign = sign_duo_values(&ak, email, &ik, APP_PREFIX, now + APP_EXPIRE);
Ok((format!("{}:{}", duo_sign, app_sign), host))
}
fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64) -> String {
let val = format!("{}|{}|{}", email, ikey, expire);
let cookie = format!("{}|{}", prefix, BASE64.encode(val.as_bytes()));
format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie))
}
pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult {
let split: Vec<&str> = response.split(':').collect();
if split.len() != 2 {
err!("Invalid response length");
}
let auth_sig = split[0];
let app_sig = split[1];
let now = Utc::now().timestamp();
let (ik, sk, ak, _host) = get_duo_keys_email(email, conn)?;
let auth_user = parse_duo_values(&sk, auth_sig, &ik, AUTH_PREFIX, now)?;
let app_user = parse_duo_values(&ak, app_sig, &ik, APP_PREFIX, now)?;
if !crypto::ct_eq(&auth_user, app_user) || !crypto::ct_eq(&auth_user, email) {
err!("Error validating duo authentication")
}
Ok(())
}
fn parse_duo_values(key: &str, val: &str, ikey: &str, prefix: &str, time: i64) -> ApiResult<String> {
let split: Vec<&str> = val.split('|').collect();
if split.len() != 3 {
err!("Invalid value length")
}
let u_prefix = split[0];
let u_b64 = split[1];
let u_sig = split[2];
let sig = crypto::hmac_sign(key, &format!("{}|{}", u_prefix, u_b64));
if !crypto::ct_eq(crypto::hmac_sign(key, &sig), crypto::hmac_sign(key, u_sig)) {
err!("Duo signatures don't match")
}
if u_prefix != prefix {
err!("Prefixes don't match")
}
let cookie_vec = match BASE64.decode(u_b64.as_bytes()) {
Ok(c) => c,
Err(_) => err!("Invalid Duo cookie encoding"),
};
let cookie = match String::from_utf8(cookie_vec) {
Ok(c) => c,
Err(_) => err!("Invalid Duo cookie encoding"),
};
let cookie_split: Vec<&str> = cookie.split('|').collect();
if cookie_split.len() != 3 {
err!("Invalid cookie length")
}
let username = cookie_split[0];
let u_ikey = cookie_split[1];
let expire = cookie_split[2];
if !crypto::ct_eq(ikey, u_ikey) {
err!("Invalid ikey")
}
let expire = match expire.parse() {
Ok(e) => e,
Err(_) => err!("Invalid expire time"),
};
if time >= expire {
err!("Expired authorization")
}
Ok(username.into())
}

View File

@@ -0,0 +1,340 @@
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json;
use crate::api::{EmptyResult, JsonResult, JsonUpcase, PasswordData};
use crate::auth::Headers;
use crate::crypto;
use crate::db::{
models::{TwoFactor, TwoFactorType},
DbConn,
};
use crate::error::Error;
use crate::mail;
use crate::CONFIG;
use chrono::{Duration, NaiveDateTime, Utc};
use std::ops::Add;
pub fn routes() -> Vec<Route> {
routes![
get_email,
send_email_login,
send_email,
email,
]
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct SendEmailLoginData {
Email: String,
MasterPasswordHash: String,
}
/// User is trying to login and wants to use email 2FA.
/// Does not require Bearer token
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> EmptyResult {
let data: SendEmailLoginData = data.into_inner().data;
use crate::db::models::User;
// Get the user
let user = match User::find_by_mail(&data.Email, &conn) {
Some(user) => user,
None => err!("Username or password is incorrect. Try again."),
};
// Check password
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Username or password is incorrect. Try again.")
}
if !CONFIG._enable_email_2fa() {
err!("Email 2FA is disabled")
}
let type_ = TwoFactorType::Email as i32;
let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn)?;
let generated_token = generate_token(CONFIG.email_token_size())?;
let mut twofactor_data = EmailTokenData::from_json(&twofactor.data)?;
twofactor_data.set_token(generated_token);
twofactor.data = twofactor_data.to_json();
twofactor.save(&conn)?;
mail::send_token(&twofactor_data.email, &twofactor_data.last_token?)?;
Ok(())
}
/// When user clicks on Manage email 2FA show the user the related information
#[post("/two-factor/get-email", data = "<data>")]
fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let type_ = TwoFactorType::Email as i32;
let enabled = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
Some(x) => x.enabled,
_ => false,
};
Ok(Json(json!({
"Email": user.email,
"Enabled": enabled,
"Object": "twoFactorEmail"
})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct SendEmailData {
/// Email where 2FA codes will be sent to, can be different than user email account.
Email: String,
MasterPasswordHash: String,
}
fn generate_token(token_size: u32) -> Result<String, Error> {
if token_size > 19 {
err!("Generating token failed")
}
// 8 bytes to create an u64 for up to 19 token digits
let bytes = crypto::get_random(vec![0; 8]);
let mut bytes_array = [0u8; 8];
bytes_array.copy_from_slice(&bytes);
let number = u64::from_be_bytes(bytes_array) % 10u64.pow(token_size);
let token = format!("{:0size$}", number, size = token_size as usize);
Ok(token)
}
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
#[post("/two-factor/send-email", data = "<data>")]
fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: SendEmailData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
if !CONFIG._enable_email_2fa() {
err!("Email 2FA is disabled")
}
let type_ = TwoFactorType::Email as i32;
if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
tf.delete(&conn)?;
}
let generated_token = generate_token(CONFIG.email_token_size())?;
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
let twofactor = TwoFactor::new(
user.uuid,
TwoFactorType::EmailVerificationChallenge,
twofactor_data.to_json(),
);
twofactor.save(&conn)?;
mail::send_token(&twofactor_data.email, &twofactor_data.last_token?)?;
Ok(())
}
#[derive(Deserialize, Serialize)]
#[allow(non_snake_case)]
struct EmailData {
Email: String,
MasterPasswordHash: String,
Token: String,
}
/// Verify email belongs to user and can be used for 2FA email codes.
#[put("/two-factor/email", data = "<data>")]
fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EmailData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn)?;
let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
let issued_token = match &email_data.last_token {
Some(t) => t,
_ => err!("No token available"),
};
if !crypto::ct_eq(issued_token, data.Token) {
err!("Token is invalid")
}
email_data.reset_token();
twofactor.atype = TwoFactorType::Email as i32;
twofactor.data = email_data.to_json();
twofactor.save(&conn)?;
Ok(Json(json!({
"Email": email_data.email,
"Enabled": "true",
"Object": "twoFactorEmail"
})))
}
/// Validate the email code when used as TwoFactor token mechanism
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
let mut email_data = EmailTokenData::from_json(&data)?;
let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn)?;
let issued_token = match &email_data.last_token {
Some(t) => t,
_ => err!("No token available"),
};
if !crypto::ct_eq(issued_token, token) {
email_data.add_attempt();
if email_data.attempts >= CONFIG.email_attempts_limit() {
email_data.reset_token();
}
twofactor.data = email_data.to_json();
twofactor.save(&conn)?;
err!("Token is invalid")
}
email_data.reset_token();
twofactor.data = email_data.to_json();
twofactor.save(&conn)?;
let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
let max_time = CONFIG.email_expiration_time() as i64;
if date.add(Duration::seconds(max_time)) < Utc::now().naive_utc() {
err!("Token has expired")
}
Ok(())
}
/// Data stored in the TwoFactor table in the db
#[derive(Serialize, Deserialize)]
pub struct EmailTokenData {
/// Email address where the token will be sent to. Can be different from account email.
pub email: String,
/// Some(token): last valid token issued that has not been entered.
/// None: valid token was used and removed.
pub last_token: Option<String>,
/// UNIX timestamp of token issue.
pub token_sent: i64,
/// Amount of token entry attempts for last_token.
pub attempts: u64,
}
impl EmailTokenData {
pub fn new(email: String, token: String) -> EmailTokenData {
EmailTokenData {
email,
last_token: Some(token),
token_sent: Utc::now().naive_utc().timestamp(),
attempts: 0,
}
}
pub fn set_token(&mut self, token: String) {
self.last_token = Some(token);
self.token_sent = Utc::now().naive_utc().timestamp();
}
pub fn reset_token(&mut self) {
self.last_token = None;
self.attempts = 0;
}
pub fn add_attempt(&mut self) {
self.attempts += 1;
}
pub fn to_json(&self) -> String {
serde_json::to_string(&self).unwrap()
}
pub fn from_json(string: &str) -> Result<EmailTokenData, Error> {
let res: Result<EmailTokenData, crate::serde_json::Error> = serde_json::from_str(&string);
match res {
Ok(x) => Ok(x),
Err(_) => err!("Could not decode EmailTokenData from string"),
}
}
}
/// Takes an email address and obscures it by replacing it with asterisks except two characters.
pub fn obscure_email(email: &str) -> String {
let split: Vec<&str> = email.split('@').collect();
let mut name = split[0].to_string();
let domain = &split[1];
let name_size = name.chars().count();
let new_name = match name_size {
1..=3 => "*".repeat(name_size),
_ => {
let stars = "*".repeat(name_size - 2);
name.truncate(2);
format!("{}{}", name, stars)
}
};
format!("{}@{}", new_name, &domain)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_obscure_email_long() {
let email = "bytes@example.ext";
let result = obscure_email(&email);
// Only first two characters should be visible.
assert_eq!(result, "by***@example.ext");
}
#[test]
fn test_obscure_email_short() {
let email = "byt@example.ext";
let result = obscure_email(&email);
// If it's smaller than 3 characters it should only show asterisks.
assert_eq!(result, "***@example.ext");
}
#[test]
fn test_token() {
let result = generate_token(19).unwrap();
assert_eq!(result.chars().count(), 19);
}
#[test]
fn test_token_too_large() {
let result = generate_token(20);
assert!(result.is_err(), "too large token should give an error");
}
}

View File

@@ -0,0 +1,146 @@
use data_encoding::BASE32;
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json;
use serde_json::Value;
use crate::api::{JsonResult, JsonUpcase, NumberOrString, PasswordData};
use crate::auth::Headers;
use crate::crypto;
use crate::db::{
models::{TwoFactor, User},
DbConn,
};
pub(crate) mod authenticator;
pub(crate) mod duo;
pub(crate) mod email;
pub(crate) mod u2f;
pub(crate) mod yubikey;
pub fn routes() -> Vec<Route> {
let mut routes = routes![
get_twofactor,
get_recover,
recover,
disable_twofactor,
disable_twofactor_put,
];
routes.append(&mut authenticator::routes());
routes.append(&mut duo::routes());
routes.append(&mut email::routes());
routes.append(&mut u2f::routes());
routes.append(&mut yubikey::routes());
routes
}
#[get("/two-factor")]
fn get_twofactor(headers: Headers, conn: DbConn) -> JsonResult {
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_list).collect();
Ok(Json(json!({
"Data": twofactors_json,
"Object": "list",
"ContinuationToken": null,
})))
}
#[post("/two-factor/get-recover", data = "<data>")]
fn get_recover(data: JsonUpcase<PasswordData>, headers: Headers) -> JsonResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
Ok(Json(json!({
"Code": user.totp_recover,
"Object": "twoFactorRecover"
})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct RecoverTwoFactor {
MasterPasswordHash: String,
Email: String,
RecoveryCode: String,
}
#[post("/two-factor/recover", data = "<data>")]
fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
let data: RecoverTwoFactor = data.into_inner().data;
use crate::db::models::User;
// Get the user
let mut user = match User::find_by_mail(&data.Email, &conn) {
Some(user) => user,
None => err!("Username or password is incorrect. Try again."),
};
// Check password
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Username or password is incorrect. Try again.")
}
// Check if recovery code is correct
if !user.check_valid_recovery_code(&data.RecoveryCode) {
err!("Recovery code is incorrect. Try again.")
}
// Remove all twofactors from the user
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
// Remove the recovery code, not needed without twofactors
user.totp_recover = None;
user.save(&conn)?;
Ok(Json(json!({})))
}
fn _generate_recover_code(user: &mut User, conn: &DbConn) {
if user.totp_recover.is_none() {
let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20]));
user.totp_recover = Some(totp_recover);
user.save(conn).ok();
}
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct DisableTwoFactorData {
MasterPasswordHash: String,
Type: NumberOrString,
}
#[post("/two-factor/disable", data = "<data>")]
fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
let user = headers.user;
if !user.check_valid_password(&password_hash) {
err!("Invalid password");
}
let type_ = data.Type.into_i32()?;
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
twofactor.delete(&conn)?;
}
Ok(Json(json!({
"Enabled": false,
"Type": type_,
"Object": "twoFactorProvider"
})))
}
#[put("/two-factor/disable", data = "<data>")]
fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
disable_twofactor(data, headers, conn)
}

View File

@@ -0,0 +1,315 @@
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json;
use serde_json::Value;
use u2f::messages::{RegisterResponse, SignResponse, U2fSignRequest};
use u2f::protocol::{Challenge, U2f};
use u2f::register::Registration;
use crate::api::core::two_factor::_generate_recover_code;
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
use crate::auth::Headers;
use crate::db::{
models::{TwoFactor, TwoFactorType},
DbConn,
};
use crate::error::Error;
use crate::CONFIG;
const U2F_VERSION: &str = "U2F_V2";
lazy_static! {
static ref APP_ID: String = format!("{}/app-id.json", &CONFIG.domain());
static ref U2F: U2f = U2f::new(APP_ID.clone());
}
pub fn routes() -> Vec<Route> {
routes![
generate_u2f,
generate_u2f_challenge,
activate_u2f,
activate_u2f_put,
]
}
#[post("/two-factor/get-u2f", data = "<data>")]
fn generate_u2f(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
if !CONFIG.domain_set() {
err!("`DOMAIN` environment variable is not set. U2F disabled")
}
let data: PasswordData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let (enabled, keys) = get_u2f_registrations(&headers.user.uuid, &conn)?;
let keys_json: Vec<Value> = keys.iter().map(U2FRegistration::to_json).collect();
Ok(Json(json!({
"Enabled": enabled,
"Keys": keys_json,
"Object": "twoFactorU2f"
})))
}
#[post("/two-factor/get-u2f-challenge", data = "<data>")]
fn generate_u2f_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let _type = TwoFactorType::U2fRegisterChallenge;
let challenge = _create_u2f_challenge(&headers.user.uuid, _type, &conn).challenge;
Ok(Json(json!({
"UserId": headers.user.uuid,
"AppId": APP_ID.to_string(),
"Challenge": challenge,
"Version": U2F_VERSION,
})))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableU2FData {
Id: NumberOrString,
// 1..5
Name: String,
MasterPasswordHash: String,
DeviceResponse: String,
}
// This struct is referenced from the U2F lib
// because it doesn't implement Deserialize
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[serde(remote = "Registration")]
struct RegistrationDef {
key_handle: Vec<u8>,
pub_key: Vec<u8>,
attestation_cert: Option<Vec<u8>>,
}
#[derive(Serialize, Deserialize)]
struct U2FRegistration {
id: i32,
name: String,
#[serde(with = "RegistrationDef")]
reg: Registration,
counter: u32,
compromised: bool,
}
impl U2FRegistration {
fn to_json(&self) -> Value {
json!({
"Id": self.id,
"Name": self.name,
"Compromised": self.compromised,
})
}
}
// This struct is copied from the U2F lib
// to add an optional error code
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct RegisterResponseCopy {
pub registration_data: String,
pub version: String,
pub client_data: String,
pub error_code: Option<NumberOrString>,
}
impl Into<RegisterResponse> for RegisterResponseCopy {
fn into(self) -> RegisterResponse {
RegisterResponse {
registration_data: self.registration_data,
version: self.version,
client_data: self.client_data,
}
}
}
#[post("/two-factor/u2f", data = "<data>")]
fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableU2FData = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let tf_type = TwoFactorType::U2fRegisterChallenge as i32;
let tf_challenge = match TwoFactor::find_by_user_and_type(&user.uuid, tf_type, &conn) {
Some(c) => c,
None => err!("Can't recover challenge"),
};
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
tf_challenge.delete(&conn)?;
let response: RegisterResponseCopy = serde_json::from_str(&data.DeviceResponse)?;
let error_code = response
.error_code
.clone()
.map_or("0".into(), NumberOrString::into_string);
if error_code != "0" {
err!("Error registering U2F token")
}
let registration = U2F.register_response(challenge.clone(), response.into())?;
let full_registration = U2FRegistration {
id: data.Id.into_i32()?,
name: data.Name,
reg: registration,
compromised: false,
counter: 0,
};
let mut regs = get_u2f_registrations(&user.uuid, &conn)?.1;
// TODO: Check that there is no repeat Id
regs.push(full_registration);
save_u2f_registrations(&user.uuid, &regs, &conn)?;
_generate_recover_code(&mut user, &conn);
let keys_json: Vec<Value> = regs.iter().map(U2FRegistration::to_json).collect();
Ok(Json(json!({
"Enabled": true,
"Keys": keys_json,
"Object": "twoFactorU2f"
})))
}
#[put("/two-factor/u2f", data = "<data>")]
fn activate_u2f_put(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_u2f(data, headers, conn)
}
fn _create_u2f_challenge(user_uuid: &str, type_: TwoFactorType, conn: &DbConn) -> Challenge {
let challenge = U2F.generate_challenge().unwrap();
TwoFactor::new(user_uuid.into(), type_, serde_json::to_string(&challenge).unwrap())
.save(conn)
.expect("Error saving challenge");
challenge
}
fn save_u2f_registrations(user_uuid: &str, regs: &[U2FRegistration], conn: &DbConn) -> EmptyResult {
TwoFactor::new(user_uuid.into(), TwoFactorType::U2f, serde_json::to_string(regs)?).save(&conn)
}
fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<U2FRegistration>), Error> {
let type_ = TwoFactorType::U2f as i32;
let (enabled, regs) = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
Some(tf) => (tf.enabled, tf.data),
None => return Ok((false, Vec::new())), // If no data, return empty list
};
let data = match serde_json::from_str(&regs) {
Ok(d) => d,
Err(_) => {
// If error, try old format
let mut old_regs = _old_parse_registrations(&regs);
if old_regs.len() != 1 {
err!("The old U2F format only allows one device")
}
// Convert to new format
let new_regs = vec![U2FRegistration {
id: 1,
name: "Unnamed U2F key".into(),
reg: old_regs.remove(0),
compromised: false,
counter: 0,
}];
// Save new format
save_u2f_registrations(user_uuid, &new_regs, &conn)?;
new_regs
}
};
Ok((enabled, data))
}
fn _old_parse_registrations(registations: &str) -> Vec<Registration> {
#[derive(Deserialize)]
struct Helper(#[serde(with = "RegistrationDef")] Registration);
let regs: Vec<Value> = serde_json::from_str(registations).expect("Can't parse Registration data");
regs.into_iter()
.map(|r| serde_json::from_value(r).unwrap())
.map(|Helper(r)| r)
.collect()
}
pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRequest> {
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn);
let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn)?
.1
.into_iter()
.map(|r| r.reg)
.collect();
if registrations.is_empty() {
err!("No U2F devices registered")
}
Ok(U2F.sign_request(challenge, registrations))
}
pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
let challenge_type = TwoFactorType::U2fLoginChallenge as i32;
let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, &conn);
let challenge = match tf_challenge {
Some(tf_challenge) => {
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
tf_challenge.delete(&conn)?;
challenge
}
None => err!("Can't recover login challenge"),
};
let response: SignResponse = serde_json::from_str(response)?;
let mut registrations = get_u2f_registrations(user_uuid, conn)?.1;
if registrations.is_empty() {
err!("No U2F devices registered")
}
for reg in &mut registrations {
let response = U2F.sign_response(challenge.clone(), reg.reg.clone(), response.clone(), reg.counter);
match response {
Ok(new_counter) => {
reg.counter = new_counter;
save_u2f_registrations(user_uuid, &registrations, &conn)?;
return Ok(());
}
Err(u2f::u2ferror::U2fError::CounterTooLow) => {
reg.compromised = true;
save_u2f_registrations(user_uuid, &registrations, &conn)?;
err!("This device might be compromised!");
}
Err(e) => {
warn!("E {:#}", e);
// break;
}
}
}
err!("error verifying response")
}

View File

@@ -0,0 +1,198 @@
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json;
use serde_json::Value;
use yubico::config::Config;
use yubico::verify;
use crate::api::core::two_factor::_generate_recover_code;
use crate::api::{EmptyResult, JsonResult, JsonUpcase, PasswordData};
use crate::auth::Headers;
use crate::db::{
models::{TwoFactor, TwoFactorType},
DbConn,
};
use crate::error::{Error, MapResult};
use crate::CONFIG;
pub fn routes() -> Vec<Route> {
routes![
generate_yubikey,
activate_yubikey,
activate_yubikey_put,
]
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableYubikeyData {
MasterPasswordHash: String,
Key1: Option<String>,
Key2: Option<String>,
Key3: Option<String>,
Key4: Option<String>,
Key5: Option<String>,
Nfc: bool,
}
#[derive(Deserialize, Serialize, Debug)]
#[allow(non_snake_case)]
pub struct YubikeyMetadata {
Keys: Vec<String>,
pub Nfc: bool,
}
fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
let data_keys = [&data.Key1, &data.Key2, &data.Key3, &data.Key4, &data.Key5];
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
}
fn jsonify_yubikeys(yubikeys: Vec<String>) -> serde_json::Value {
let mut result = json!({});
for (i, key) in yubikeys.into_iter().enumerate() {
result[format!("Key{}", i + 1)] = Value::String(key);
}
result
}
fn get_yubico_credentials() -> Result<(String, String), Error> {
if !CONFIG._enable_yubico() {
err!("Yubico support is disabled");
}
match (CONFIG.yubico_client_id(), CONFIG.yubico_secret_key()) {
(Some(id), Some(secret)) => Ok((id, secret)),
_ => err!("`YUBICO_CLIENT_ID` or `YUBICO_SECRET_KEY` environment variable is not set. Yubikey OTP Disabled"),
}
}
fn verify_yubikey_otp(otp: String) -> EmptyResult {
let (yubico_id, yubico_secret) = get_yubico_credentials()?;
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
match CONFIG.yubico_server() {
Some(server) => verify(otp, config.set_api_hosts(vec![server])),
None => verify(otp, config),
}
.map_res("Failed to verify OTP")
.and(Ok(()))
}
#[post("/two-factor/get-yubikey", data = "<data>")]
fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
// Make sure the credentials are set
get_yubico_credentials()?;
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let user_uuid = &user.uuid;
let yubikey_type = TwoFactorType::YubiKey as i32;
let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &conn);
if let Some(r) = r {
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
result["Enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result))
} else {
Ok(Json(json!({
"Enabled": false,
"Object": "twoFactorU2f",
})))
}
}
#[post("/two-factor/yubikey", data = "<data>")]
fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableYubikeyData = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
// Check if we already have some data
let mut yubikey_data = match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &conn) {
Some(data) => data,
None => TwoFactor::new(user.uuid.clone(), TwoFactorType::YubiKey, String::new()),
};
let yubikeys = parse_yubikeys(&data);
if yubikeys.is_empty() {
return Ok(Json(json!({
"Enabled": false,
"Object": "twoFactorU2f",
})));
}
// Ensure they are valid OTPs
for yubikey in &yubikeys {
if yubikey.len() == 12 {
// YubiKey ID
continue;
}
verify_yubikey_otp(yubikey.to_owned()).map_res("Invalid Yubikey OTP provided")?;
}
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (&x[..12]).to_owned()).collect();
let yubikey_metadata = YubikeyMetadata {
Keys: yubikey_ids,
Nfc: data.Nfc,
};
yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap();
yubikey_data.save(&conn)?;
_generate_recover_code(&mut user, &conn);
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
result["Enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result))
}
#[put("/two-factor/yubikey", data = "<data>")]
fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_yubikey(data, headers, conn)
}
pub fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResult {
if response.len() != 44 {
err!("Invalid Yubikey OTP length");
}
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(twofactor_data).expect("Can't parse Yubikey Metadata");
let response_id = &response[..12];
if !yubikey_metadata.Keys.contains(&response_id.to_owned()) {
err!("Given Yubikey is not registered");
}
let result = verify_yubikey_otp(response.to_owned());
match result {
Ok(_answer) => Ok(()),
Err(_e) => err!("Failed to verify Yubikey against OTP server"),
}
}

View File

@@ -1,14 +1,15 @@
use std::fs::{create_dir_all, remove_file, symlink_metadata, File}; use std::fs::{create_dir_all, remove_file, symlink_metadata, File};
use std::io::prelude::*; use std::io::prelude::*;
use std::net::ToSocketAddrs;
use std::time::{Duration, SystemTime}; use std::time::{Duration, SystemTime};
use rocket::http::ContentType; use rocket::http::ContentType;
use rocket::response::Content; use rocket::response::Content;
use rocket::Route; use rocket::Route;
use reqwest::{header::HeaderMap, Client, Response}; use reqwest::{header::HeaderMap, Client, Response, Url};
use rocket::http::{Cookie}; use rocket::http::Cookie;
use regex::Regex; use regex::Regex;
use soup::prelude::*; use soup::prelude::*;
@@ -22,22 +23,46 @@ pub fn routes() -> Vec<Route> {
const FALLBACK_ICON: &[u8; 344] = include_bytes!("../static/fallback-icon.png"); const FALLBACK_ICON: &[u8; 344] = include_bytes!("../static/fallback-icon.png");
const ALLOWED_CHARS: &str = "_-.";
lazy_static! { lazy_static! {
// Reuse the client between requests // Reuse the client between requests
static ref CLIENT: Client = Client::builder() static ref CLIENT: Client = Client::builder()
.use_sys_proxy()
.gzip(true) .gzip(true)
.timeout(Duration::from_secs(5)) .timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
.default_headers(_header_map()) .default_headers(_header_map())
.build() .build()
.unwrap(); .unwrap();
} }
fn is_valid_domain(domain: &str) -> bool {
// Don't allow empty or too big domains or path traversal
if domain.is_empty() || domain.len() > 255 || domain.contains("..") {
return false;
}
// Only alphanumeric or specific characters
for c in domain.chars() {
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
return false;
}
}
true
}
#[get("/<domain>/icon.png")] #[get("/<domain>/icon.png")]
fn icon(domain: String) -> Content<Vec<u8>> { fn icon(domain: String) -> Content<Vec<u8>> {
let icon_type = ContentType::new("image", "x-icon"); let icon_type = ContentType::new("image", "x-icon");
// Validate the domain to avoid directory traversal attacks if !is_valid_domain(&domain) {
if domain.contains('/') || domain.contains("..") { warn!("Invalid domain: {:#?}", domain);
return Content(icon_type, FALLBACK_ICON.to_vec());
}
if check_icon_domain_is_blacklisted(&domain) {
warn!("Domain is blacklisted: {:#?}", domain);
return Content(icon_type, FALLBACK_ICON.to_vec()); return Content(icon_type, FALLBACK_ICON.to_vec());
} }
@@ -46,6 +71,37 @@ fn icon(domain: String) -> Content<Vec<u8>> {
Content(icon_type, icon) Content(icon_type, icon)
} }
fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
let mut is_blacklisted = false;
if CONFIG.icon_blacklist_non_global_ips() {
is_blacklisted = (domain, 0)
.to_socket_addrs()
.map(|x| {
for ip_port in x {
if !ip_port.ip().is_global() {
warn!("IP {} for domain '{}' is not a global IP!", ip_port.ip(), domain);
return true;
}
}
false
})
.unwrap_or(false);
}
// Skip the regex check if the previous one is true already
if !is_blacklisted {
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
let regex = Regex::new(&blacklist).expect("Valid Regex");
if regex.is_match(&domain) {
warn!("Blacklisted domain: {:#?} matched {:#?}", domain, blacklist);
is_blacklisted = true;
}
}
}
is_blacklisted
}
fn get_icon(domain: &str) -> Vec<u8> { fn get_icon(domain: &str) -> Vec<u8> {
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain); let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
@@ -132,11 +188,17 @@ fn icon_is_expired(path: &str) -> bool {
} }
#[derive(Debug)] #[derive(Debug)]
struct IconList { struct Icon {
priority: u8, priority: u8,
href: String, href: String,
} }
impl Icon {
fn new(priority: u8, href: String) -> Self {
Self { href, priority }
}
}
/// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response. /// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response.
/// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies. /// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies.
/// This does not mean that that location does exists, but it is the default location browser use. /// This does not mean that that location does exists, but it is the default location browser use.
@@ -149,13 +211,13 @@ struct IconList {
/// let (mut iconlist, cookie_str) = get_icon_url("github.com")?; /// let (mut iconlist, cookie_str) = get_icon_url("github.com")?;
/// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?; /// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?;
/// ``` /// ```
fn get_icon_url(domain: &str) -> Result<(Vec<IconList>, String), Error> { fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
// Default URL with secure and insecure schemes // Default URL with secure and insecure schemes
let ssldomain = format!("https://{}", domain); let ssldomain = format!("https://{}", domain);
let httpdomain = format!("http://{}", domain); let httpdomain = format!("http://{}", domain);
// Create the iconlist // Create the iconlist
let mut iconlist: Vec<IconList> = Vec::new(); let mut iconlist: Vec<Icon> = Vec::new();
// Create the cookie_str to fill it all the cookies from the response // Create the cookie_str to fill it all the cookies from the response
// These cookies can be used to request/download the favicon image. // These cookies can be used to request/download the favicon image.
@@ -166,14 +228,22 @@ fn get_icon_url(domain: &str) -> Result<(Vec<IconList>, String), Error> {
if let Ok(content) = resp { if let Ok(content) = resp {
// Extract the URL from the respose in case redirects occured (like @ gitlab.com) // Extract the URL from the respose in case redirects occured (like @ gitlab.com)
let url = content.url().clone(); let url = content.url().clone();
let raw_cookies = content.headers().get_all("set-cookie"); let raw_cookies = content.headers().get_all("set-cookie");
cookie_str = raw_cookies.iter().map(|raw_cookie| { cookie_str = raw_cookies
let cookie = Cookie::parse(raw_cookie.to_str().unwrap_or_default()).unwrap(); .iter()
format!("{}={}; ", cookie.name(), cookie.value()) .filter_map(|raw_cookie| raw_cookie.to_str().ok())
}).collect::<String>(); .map(|cookie_str| {
if let Ok(cookie) = Cookie::parse(cookie_str) {
format!("{}={}; ", cookie.name(), cookie.value())
} else {
String::new()
}
})
.collect::<String>();
// Add the default favicon.ico to the list with the domain the content responded from. // Add the default favicon.ico to the list with the domain the content responded from.
iconlist.push(IconList { priority: 35, href: url.join("/favicon.ico").unwrap().into_string() }); iconlist.push(Icon::new(35, url.join("/favicon.ico").unwrap().into_string()));
let soup = Soup::from_reader(content)?; let soup = Soup::from_reader(content)?;
// Search for and filter // Search for and filter
@@ -185,15 +255,17 @@ fn get_icon_url(domain: &str) -> Result<(Vec<IconList>, String), Error> {
// Loop through all the found icons and determine it's priority // Loop through all the found icons and determine it's priority
for favicon in favicons { for favicon in favicons {
let sizes = favicon.get("sizes").unwrap_or_default(); let sizes = favicon.get("sizes");
let href = url.join(&favicon.get("href").unwrap_or_default()).unwrap().into_string(); let href = favicon.get("href").expect("Missing href");
let priority = get_icon_priority(&href, &sizes); let full_href = url.join(&href).unwrap().into_string();
iconlist.push(IconList { priority, href }) let priority = get_icon_priority(&full_href, sizes);
iconlist.push(Icon::new(priority, full_href))
} }
} else { } else {
// Add the default favicon.ico to the list with just the given domain // Add the default favicon.ico to the list with just the given domain
iconlist.push(IconList { priority: 35, href: format!("{}/favicon.ico", ssldomain) }); iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain)));
} }
// Sort the iconlist by priority // Sort the iconlist by priority
@@ -204,12 +276,28 @@ fn get_icon_url(domain: &str) -> Result<(Vec<IconList>, String), Error> {
} }
fn get_page(url: &str) -> Result<Response, Error> { fn get_page(url: &str) -> Result<Response, Error> {
//CLIENT.get(url).send()?.error_for_status().map_err(Into::into)
get_page_with_cookies(url, "") get_page_with_cookies(url, "")
} }
fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error> { fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error> {
CLIENT.get(url).header("cookie", cookie_str).send()?.error_for_status().map_err(Into::into) if check_icon_domain_is_blacklisted(Url::parse(url).unwrap().host_str().unwrap_or_default()) {
err!("Favicon rel linked to a non blacklisted domain!");
}
if cookie_str.is_empty() {
CLIENT
.get(url)
.send()?
.error_for_status()
.map_err(Into::into)
} else {
CLIENT
.get(url)
.header("cookie", cookie_str)
.send()?
.error_for_status()
.map_err(Into::into)
}
} }
/// Returns a Integer with the priority of the type of the icon which to prefer. /// Returns a Integer with the priority of the type of the icon which to prefer.
@@ -224,7 +312,7 @@ fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error>
/// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32"); /// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32");
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", ""); /// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
/// ``` /// ```
fn get_icon_priority(href: &str, sizes: &str) -> u8 { fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
// Check if there is a dimension set // Check if there is a dimension set
let (width, height) = parse_sizes(sizes); let (width, height) = parse_sizes(sizes);
@@ -272,19 +360,19 @@ fn get_icon_priority(href: &str, sizes: &str) -> u8 {
/// let (width, height) = parse_sizes("x128x128"); // (128, 128) /// let (width, height) = parse_sizes("x128x128"); // (128, 128)
/// let (width, height) = parse_sizes("32"); // (0, 0) /// let (width, height) = parse_sizes("32"); // (0, 0)
/// ``` /// ```
fn parse_sizes(sizes: &str) -> (u16, u16) { fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
let mut width: u16 = 0; let mut width: u16 = 0;
let mut height: u16 = 0; let mut height: u16 = 0;
if !sizes.is_empty() { if let Some(sizes) = sizes {
match Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap().captures(sizes.trim()) { match Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap().captures(sizes.trim()) {
None => {}, None => {}
Some(dimensions) => { Some(dimensions) => {
if dimensions.len() >= 3 { if dimensions.len() >= 3 {
width = dimensions[1].parse::<u16>().unwrap_or_default(); width = dimensions[1].parse::<u16>().unwrap_or_default();
height = dimensions[2].parse::<u16>().unwrap_or_default(); height = dimensions[2].parse::<u16>().unwrap_or_default();
} }
}, }
} }
} }
@@ -292,21 +380,18 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
} }
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> { fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
let (mut iconlist, cookie_str) = get_icon_url(&domain)?; let (iconlist, cookie_str) = get_icon_url(&domain)?;
let mut buffer = Vec::new(); let mut buffer = Vec::new();
iconlist.truncate(5); for icon in iconlist.iter().take(5) {
for icon in iconlist { match get_page_with_cookies(&icon.href, &cookie_str) {
let url = icon.href;
info!("Downloading icon for {} via {}...", domain, url);
match get_page_with_cookies(&url, &cookie_str) {
Ok(mut res) => { Ok(mut res) => {
info!("Download finished for {}", url); info!("Downloaded icon from {}", icon.href);
res.copy_to(&mut buffer)?; res.copy_to(&mut buffer)?;
break; break;
}, }
Err(_) => info!("Download failed for {}", url), Err(_) => info!("Download failed for {}", icon.href),
}; };
} }

View File

@@ -1,20 +1,17 @@
use num_traits::FromPrimitive;
use rocket::request::{Form, FormItems, FromForm}; use rocket::request::{Form, FormItems, FromForm};
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use num_traits::FromPrimitive; use crate::api::core::two_factor::email::EmailTokenData;
use crate::api::core::two_factor::{duo, email, yubikey};
use crate::api::{ApiResult, EmptyResult, JsonResult};
use crate::auth::ClientIp;
use crate::db::models::*; use crate::db::models::*;
use crate::db::DbConn; use crate::db::DbConn;
use crate::mail;
use crate::util::{self, JsonMap}; use crate::util;
use crate::api::{ApiResult, EmptyResult, JsonResult};
use crate::auth::ClientIp;
use crate::CONFIG; use crate::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
@@ -68,7 +65,7 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
"expires_in": expires_in, "expires_in": expires_in,
"token_type": "Bearer", "token_type": "Bearer",
"refresh_token": device.refresh_token, "refresh_token": device.refresh_token,
"Key": user.key, "Key": user.akey,
"PrivateKey": user.private_key, "PrivateKey": user.private_key,
}))) })))
} }
@@ -99,26 +96,19 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
) )
} }
// On iOS, device_type sends "iOS", on others it sends a number let (mut device, new_device) = get_device(&data, &conn, &user);
let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(0);
let device_id = data.device_identifier.clone().expect("No device id provided");
let device_name = data.device_name.clone().expect("No device name provided");
// Find device or create new let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &conn)?;
let mut device = match Device::find_by_uuid(&device_id, &conn) {
Some(device) => { if CONFIG.mail_enabled() && new_device {
// Check if owned device, and recreate if not if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &device.updated_at, &device.name) {
if device.user_uuid != user.uuid { error!("Error sending new device email: {:#?}", e);
info!("Device exists but is owned by another user. The old device will be discarded");
Device::new(device_id, user.uuid.clone(), device_name, device_type) if CONFIG.require_device_email() {
} else { err!("Could not send login notification email. Please contact your administrator.")
device
} }
} }
None => Device::new(device_id, user.uuid.clone(), device_name, device_type), }
};
let twofactor_token = twofactor_auth(&user.uuid, &data.clone(), &mut device, &conn)?;
// Common // Common
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap(); let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
@@ -132,7 +122,7 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
"expires_in": expires_in, "expires_in": expires_in,
"token_type": "Bearer", "token_type": "Bearer",
"refresh_token": device.refresh_token, "refresh_token": device.refresh_token,
"Key": user.key, "Key": user.akey,
"PrivateKey": user.private_key, "PrivateKey": user.private_key,
//"TwoFactorToken": "11122233333444555666777888999" //"TwoFactorToken": "11122233333444555666777888999"
}); });
@@ -145,6 +135,35 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
Ok(Json(result)) Ok(Json(result))
} }
/// Retrieves an existing device or creates a new device from ConnectData and the User
fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) {
// On iOS, device_type sends "iOS", on others it sends a number
let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(0);
let device_id = data.device_identifier.clone().expect("No device id provided");
let device_name = data.device_name.clone().expect("No device name provided");
let mut new_device = false;
// Find device or create new
let device = match Device::find_by_uuid(&device_id, &conn) {
Some(device) => {
// Check if owned device, and recreate if not
if device.user_uuid != user.uuid {
info!("Device exists but is owned by another user. The old device will be discarded");
new_device = true;
Device::new(device_id, user.uuid.clone(), device_name, device_type)
} else {
device
}
}
None => {
new_device = true;
Device::new(device_id, user.uuid.clone(), device_name, device_type)
}
};
(device, new_device)
}
fn twofactor_auth( fn twofactor_auth(
user_uuid: &str, user_uuid: &str,
data: &ConnectData, data: &ConnectData,
@@ -152,62 +171,50 @@ fn twofactor_auth(
conn: &DbConn, conn: &DbConn,
) -> ApiResult<Option<String>> { ) -> ApiResult<Option<String>> {
let twofactors = TwoFactor::find_by_user(user_uuid, conn); let twofactors = TwoFactor::find_by_user(user_uuid, conn);
let providers: Vec<_> = twofactors.iter().map(|tf| tf.type_).collect();
// No twofactor token if twofactor is disabled // No twofactor token if twofactor is disabled
if twofactors.is_empty() { if twofactors.is_empty() {
return Ok(None); return Ok(None);
} }
let provider = data.two_factor_provider.unwrap_or(providers[0]); // If we aren't given a two factor provider, asume the first one let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, asume the first one
let twofactor_code = match data.two_factor_token { let twofactor_code = match data.two_factor_token {
Some(ref code) => code, Some(ref code) => code,
None => err_json!(_json_err_twofactor(&providers, user_uuid, conn)?), None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?),
}; };
let twofactor = twofactors.iter().filter(|tf| tf.type_ == provider).nth(0); let selected_twofactor = twofactors
.into_iter()
.filter(|tf| tf.atype == selected_id && tf.enabled)
.nth(0);
use crate::api::core::two_factor as _tf;
use crate::crypto::ct_eq;
let selected_data = _selected_data(selected_twofactor);
let mut remember = data.two_factor_remember.unwrap_or(0);
match TwoFactorType::from_i32(selected_id) {
Some(TwoFactorType::Authenticator) => _tf::authenticator::validate_totp_code_str(twofactor_code, &selected_data?)?,
Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?,
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?,
Some(TwoFactorType::Duo) => _tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?,
Some(TwoFactorType::Email) => _tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn)?,
match TwoFactorType::from_i32(provider) {
Some(TwoFactorType::Remember) => { Some(TwoFactorType::Remember) => {
match device.twofactor_remember { match device.twofactor_remember {
Some(ref remember) if remember == twofactor_code => return Ok(None), // No twofactor token needed here Some(ref code) if !CONFIG.disable_2fa_remember() && ct_eq(code, twofactor_code) => {
_ => err_json!(_json_err_twofactor(&providers, user_uuid, conn)?), remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time
}
_ => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?),
} }
} }
Some(TwoFactorType::Authenticator) => {
let twofactor = match twofactor {
Some(tf) => tf,
None => err!("TOTP not enabled"),
};
let totp_code: u64 = match twofactor_code.parse() {
Ok(code) => code,
_ => err!("Invalid TOTP code"),
};
if !twofactor.check_totp_code(totp_code) {
err_json!(_json_err_twofactor(&providers, user_uuid, conn)?)
}
}
Some(TwoFactorType::U2f) => {
use crate::api::core::two_factor;
two_factor::validate_u2f_login(user_uuid, &twofactor_code, conn)?;
}
Some(TwoFactorType::YubiKey) => {
use crate::api::core::two_factor;
two_factor::validate_yubikey_login(user_uuid, twofactor_code, conn)?;
}
_ => err!("Invalid two factor provider"), _ => err!("Invalid two factor provider"),
} }
if data.two_factor_remember.unwrap_or(0) == 1 { if !CONFIG.disable_2fa_remember() && remember == 1 {
Ok(Some(device.refresh_twofactor_remember())) Ok(Some(device.refresh_twofactor_remember()))
} else { } else {
device.delete_twofactor_remember(); device.delete_twofactor_remember();
@@ -215,6 +222,13 @@ fn twofactor_auth(
} }
} }
fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
match tf {
Some(tf) => Ok(tf.data),
None => err!("Two factor doesn't exist"),
}
}
fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> { fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {
use crate::api::core::two_factor; use crate::api::core::two_factor;
@@ -232,26 +246,37 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ } Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
Some(TwoFactorType::U2f) if CONFIG.domain_set() => { Some(TwoFactorType::U2f) if CONFIG.domain_set() => {
let request = two_factor::generate_u2f_login(user_uuid, conn)?; let request = two_factor::u2f::generate_u2f_login(user_uuid, conn)?;
let mut challenge_list = Vec::new(); let mut challenge_list = Vec::new();
for key in request.registered_keys { for key in request.registered_keys {
let mut challenge_map = JsonMap::new(); challenge_list.push(json!({
"appId": request.app_id,
challenge_map.insert("appId".into(), Value::String(request.app_id.clone())); "challenge": request.challenge,
challenge_map.insert("challenge".into(), Value::String(request.challenge.clone())); "version": key.version,
challenge_map.insert("version".into(), Value::String(key.version)); "keyHandle": key.key_handle,
challenge_map.insert("keyHandle".into(), Value::String(key.key_handle.unwrap_or_default())); }));
challenge_list.push(Value::Object(challenge_map));
} }
let mut map = JsonMap::new();
use serde_json;
let challenge_list_str = serde_json::to_string(&challenge_list).unwrap(); let challenge_list_str = serde_json::to_string(&challenge_list).unwrap();
map.insert("Challenges".into(), Value::String(challenge_list_str)); result["TwoFactorProviders2"][provider.to_string()] = json!({
result["TwoFactorProviders2"][provider.to_string()] = Value::Object(map); "Challenges": challenge_list_str,
});
}
Some(TwoFactorType::Duo) => {
let email = match User::find_by_uuid(user_uuid, &conn) {
Some(u) => u.email,
None => err!("User does not exist"),
};
let (signature, host) = duo::generate_duo_signature(&email, conn)?;
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Host": host,
"Signature": signature,
});
} }
Some(tf_type @ TwoFactorType::YubiKey) => { Some(tf_type @ TwoFactorType::YubiKey) => {
@@ -260,12 +285,24 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
None => err!("No YubiKey devices registered"), None => err!("No YubiKey devices registered"),
}; };
let yubikey_metadata: two_factor::YubikeyMetadata = let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
serde_json::from_str(&twofactor.data).expect("Can't parse Yubikey Metadata");
let mut map = JsonMap::new(); result["TwoFactorProviders2"][provider.to_string()] = json!({
map.insert("Nfc".into(), Value::Bool(yubikey_metadata.Nfc)); "Nfc": yubikey_metadata.Nfc,
result["TwoFactorProviders2"][provider.to_string()] = Value::Object(map); })
}
Some(tf_type @ TwoFactorType::Email) => {
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, &conn) {
Some(tf) => tf,
None => err!("No twofactor email registered"),
};
let email_data = EmailTokenData::from_json(&twofactor.data)?;
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Email": email::obscure_email(&email_data.email),
})
} }
_ => {} _ => {}

View File

@@ -47,10 +47,13 @@ impl NumberOrString {
} }
} }
fn into_i32(self) -> Option<i32> { fn into_i32(self) -> ApiResult<i32> {
use std::num::ParseIntError as PIE;
match self { match self {
NumberOrString::Number(n) => Some(n), NumberOrString::Number(n) => Ok(n),
NumberOrString::String(s) => s.parse().ok(), NumberOrString::String(s) => s
.parse()
.map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string())),
} }
} }
} }

View File

@@ -88,7 +88,7 @@ fn serialize(val: Value) -> Vec<u8> {
fn serialize_date(date: NaiveDateTime) -> Value { fn serialize_date(date: NaiveDateTime) -> Value {
let seconds: i64 = date.timestamp(); let seconds: i64 = date.timestamp();
let nanos: i64 = date.timestamp_subsec_nanos() as i64; let nanos: i64 = date.timestamp_subsec_nanos().into();
let timestamp = nanos << 34 | seconds; let timestamp = nanos << 34 | seconds;
let bs = timestamp.to_be_bytes(); let bs = timestamp.to_be_bytes();
@@ -230,7 +230,7 @@ pub struct WebSocketUsers {
} }
impl WebSocketUsers { impl WebSocketUsers {
fn send_update(&self, user_uuid: &String, data: &[u8]) -> ws::Result<()> { fn send_update(&self, user_uuid: &str, data: &[u8]) -> ws::Result<()> {
if let Some(user) = self.map.get(user_uuid) { if let Some(user) = self.map.get(user_uuid) {
for sender in user.iter() { for sender in user.iter() {
sender.send(data)?; sender.send(data)?;
@@ -249,7 +249,7 @@ impl WebSocketUsers {
ut, ut,
); );
self.send_update(&user.uuid.clone(), &data).ok(); self.send_update(&user.uuid, &data).ok();
} }
pub fn send_folder_update(&self, ut: UpdateType, folder: &Folder) { pub fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {

View File

@@ -1,4 +1,3 @@
use std::io;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use rocket::http::ContentType; use rocket::http::ContentType;
@@ -9,21 +8,22 @@ use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use crate::util::Cached; use crate::util::Cached;
use crate::error::Error;
use crate::CONFIG; use crate::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
if CONFIG.web_vault_enabled() { if CONFIG.web_vault_enabled() {
routes![web_index, app_id, web_files, attachments, alive] routes![web_index, app_id, web_files, attachments, alive, static_files]
} else { } else {
routes![attachments, alive] routes![attachments, alive, static_files]
} }
} }
#[get("/")] #[get("/")]
fn web_index() -> Cached<io::Result<NamedFile>> { fn web_index() -> Cached<Option<NamedFile>> {
Cached::short(NamedFile::open( Cached::short(NamedFile::open(
Path::new(&CONFIG.web_vault_folder()).join("index.html"), Path::new(&CONFIG.web_vault_folder()).join("index.html"),
)) ).ok())
} }
#[get("/app-id.json")] #[get("/app-id.json")]
@@ -46,13 +46,13 @@ fn app_id() -> Cached<Content<Json<Value>>> {
} }
#[get("/<p..>", rank = 10)] // Only match this if the other routes don't match #[get("/<p..>", rank = 10)] // Only match this if the other routes don't match
fn web_files(p: PathBuf) -> Cached<io::Result<NamedFile>> { fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p))) Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).ok())
} }
#[get("/attachments/<uuid>/<file..>")] #[get("/attachments/<uuid>/<file..>")]
fn attachments(uuid: String, file: PathBuf) -> io::Result<NamedFile> { fn attachments(uuid: String, file: PathBuf) -> Option<NamedFile> {
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file)) NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file)).ok()
} }
#[get("/alive")] #[get("/alive")]
@@ -62,3 +62,18 @@ fn alive() -> Json<String> {
Json(format_date(&Utc::now().naive_utc())) Json(format_date(&Utc::now().naive_utc()))
} }
#[get("/bwrs_static/<filename>")]
fn static_files(filename: String) -> Result<Content<&'static [u8]>, Error> {
match filename.as_ref() {
"mail-github.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/mail-github.png"))),
"logo-gray.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))),
"error-x.svg" => Ok(Content(ContentType::SVG, include_bytes!("../static/images/error-x.svg"))),
"bootstrap.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
"bootstrap-native-v4.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native-v4.js"))),
"md5.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/md5.js"))),
"identicon.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/identicon.js"))),
_ => err!("Image not found"),
}
}

View File

@@ -21,17 +21,11 @@ lazy_static! {
pub static ref JWT_ADMIN_ISSUER: String = format!("{}|admin", CONFIG.domain()); pub static ref JWT_ADMIN_ISSUER: String = format!("{}|admin", CONFIG.domain());
static ref PRIVATE_RSA_KEY: Vec<u8> = match read_file(&CONFIG.private_rsa_key()) { static ref PRIVATE_RSA_KEY: Vec<u8> = match read_file(&CONFIG.private_rsa_key()) {
Ok(key) => key, Ok(key) => key,
Err(e) => panic!( Err(e) => panic!("Error loading private RSA Key.\n Error: {}", e),
"Error loading private RSA Key from {}\n Error: {}",
CONFIG.private_rsa_key(), e
),
}; };
static ref PUBLIC_RSA_KEY: Vec<u8> = match read_file(&CONFIG.public_rsa_key()) { static ref PUBLIC_RSA_KEY: Vec<u8> = match read_file(&CONFIG.public_rsa_key()) {
Ok(key) => key, Ok(key) => key,
Err(e) => panic!( Err(e) => panic!("Error loading public RSA Key.\n Error: {}", e),
"Error loading public RSA Key from {}\n Error: {}",
CONFIG.public_rsa_key(), e
),
}; };
} }
@@ -46,7 +40,6 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
let validation = jsonwebtoken::Validation { let validation = jsonwebtoken::Validation {
leeway: 30, // 30 seconds leeway: 30, // 30 seconds
validate_exp: true, validate_exp: true,
validate_iat: false, // IssuedAt is the same as NotBefore
validate_nbf: true, validate_nbf: true,
aud: None, aud: None,
iss: Some(issuer), iss: Some(issuer),
@@ -293,7 +286,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
device: headers.device, device: headers.device,
user, user,
org_user_type: { org_user_type: {
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.type_) { if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) {
org_usr_type org_usr_type
} else { } else {
// This should only happen if the DB is corrupted // This should only happen if the DB is corrupted

View File

@@ -9,9 +9,14 @@ lazy_static! {
println!("Error loading config:\n\t{:?}\n", e); println!("Error loading config:\n\t{:?}\n", e);
exit(12) exit(12)
}); });
pub static ref CONFIG_FILE: String = get_env("CONFIG_FILE").unwrap_or_else(|| "data/config.json".into()); pub static ref CONFIG_FILE: String = {
let data_folder = get_env("DATA_FOLDER").unwrap_or_else(|| String::from("data"));
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{}/config.json", data_folder))
};
} }
pub type Pass = String;
macro_rules! make_config { macro_rules! make_config {
($( ($(
$(#[doc = $groupdoc:literal])? $(#[doc = $groupdoc:literal])?
@@ -59,13 +64,26 @@ macro_rules! make_config {
/// Merges the values of both builders into a new builder. /// Merges the values of both builders into a new builder.
/// If both have the same element, `other` wins. /// If both have the same element, `other` wins.
fn merge(&self, other: &Self) -> Self { fn merge(&self, other: &Self, show_overrides: bool) -> Self {
let mut overrides = Vec::new();
let mut builder = self.clone(); let mut builder = self.clone();
$($( $($(
if let v @Some(_) = &other.$name { if let v @Some(_) = &other.$name {
builder.$name = v.clone(); builder.$name = v.clone();
if self.$name.is_some() {
overrides.push(stringify!($name).to_uppercase());
}
} }
)+)+ )+)+
if show_overrides && !overrides.is_empty() {
// We can't use warn! here because logging isn't setup yet.
println!("[WARNING] The following environment variables are being overriden by the config file,");
println!("[WARNING] please use the admin panel to make changes to them:");
println!("[WARNING] {}\n", overrides.join(", "));
}
builder builder
} }
@@ -114,6 +132,7 @@ macro_rules! make_config {
fn _get_form_type(rust_type: &str) -> &'static str { fn _get_form_type(rust_type: &str) -> &'static str {
match rust_type { match rust_type {
"Pass" => "password",
"String" => "text", "String" => "text",
"bool" => "checkbox", "bool" => "checkbox",
_ => "number" _ => "number"
@@ -161,7 +180,7 @@ macro_rules! make_config {
match $value { match $value {
Some(v) => v, Some(v) => v,
None => { None => {
let f: &Fn(&ConfigItems) -> _ = &$default_fn; let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn;
f($config) f($config)
} }
} }
@@ -183,10 +202,9 @@ make_config! {
folders { folders {
/// Data folder |> Main data folder /// Data folder |> Main data folder
data_folder: String, false, def, "data".to_string(); data_folder: String, false, def, "data".to_string();
/// Database URL /// Database URL
database_url: String, false, auto, |c| format!("{}/{}", c.data_folder, "db.sqlite3"); database_url: String, false, auto, |c| format!("{}/{}", c.data_folder, "db.sqlite3");
/// Icon chache folder /// Icon cache folder
icon_cache_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "icon_cache"); icon_cache_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "icon_cache");
/// Attachments folder /// Attachments folder
attachments_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "attachments"); attachments_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "attachments");
@@ -208,28 +226,34 @@ make_config! {
/// General settings /// General settings
settings { settings {
/// Domain URL |> This needs to be set to the URL used to access the server, including 'http[s]://' and port, if it's different than the default. Some server functions don't work correctly without this value /// Domain URL |> This needs to be set to the URL used to access the server, including 'http[s]://'
/// and port, if it's different than the default. Some server functions don't work correctly without this value
domain: String, true, def, "http://localhost".to_string(); domain: String, true, def, "http://localhost".to_string();
/// PRIVATE |> Domain set /// Domain Set |> Indicates if the domain is set by the admin. Otherwise the default will be used.
domain_set: bool, false, def, false; domain_set: bool, false, def, false;
/// Enable web vault /// Enable web vault
web_vault_enabled: bool, false, def, true; web_vault_enabled: bool, false, def, true;
/// Disable icon downloads |> Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER, /// HIBP Api Key |> HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
/// but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0, hibp_api_key: Pass, true, option;
/// Disable icon downloads |> Set to true to disable icon downloading, this would still serve icons from
/// $ICON_CACHE_FOLDER, but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
/// otherwise it will delete them and they won't be downloaded again. /// otherwise it will delete them and they won't be downloaded again.
disable_icon_download: bool, true, def, false; disable_icon_download: bool, true, def, false;
/// Allow new signups |> Controls if new users can register. Note that while this is disabled, users could still be invited /// Allow new signups |> Controls if new users can register. Note that while this is disabled, users could still be invited
signups_allowed: bool, true, def, true; signups_allowed: bool, true, def, true;
/// Allow invitations |> Controls whether users can be invited by organization admins, even when signups are disabled /// Allow invitations |> Controls whether users can be invited by organization admins, even when signups are disabled
invitations_allowed: bool, true, def, true; invitations_allowed: bool, true, def, true;
/// Password iterations |> Number of server-side passwords hashing iterations. The changes only apply when a user changes their password. Not recommended to lower the value /// Password iterations |> Number of server-side passwords hashing iterations.
/// The changes only apply when a user changes their password. Not recommended to lower the value
password_iterations: i32, true, def, 100_000; password_iterations: i32, true, def, 100_000;
/// Show password hints |> Controls if the password hint should be shown directly in the web page. Otherwise, if email is disabled, there is no way to see the password hint /// Show password hints |> Controls if the password hint should be shown directly in the web page.
/// Otherwise, if email is disabled, there is no way to see the password hint
show_password_hint: bool, true, def, true; show_password_hint: bool, true, def, true;
/// Admin page token |> The token used to authenticate in this very same page. Changing it here won't deauthorize the current session /// Admin page token |> The token used to authenticate in this very same page. Changing it here won't deauthorize the current session
admin_token: String, true, option; admin_token: Pass, true, option;
}, },
/// Advanced settings /// Advanced settings
@@ -238,14 +262,44 @@ make_config! {
icon_cache_ttl: u64, true, def, 2_592_000; icon_cache_ttl: u64, true, def, 2_592_000;
/// Negative icon cache expiry |> Number of seconds before trying to download an icon that failed again. /// Negative icon cache expiry |> Number of seconds before trying to download an icon that failed again.
icon_cache_negttl: u64, true, def, 259_200; icon_cache_negttl: u64, true, def, 259_200;
/// Icon download timeout |> Number of seconds when to stop attempting to download an icon.
icon_download_timeout: u64, true, def, 10;
/// Icon blacklist Regex |> Any domains or IPs that match this regex won't be fetched by the icon service.
/// Useful to hide other servers in the local network. Check the WIKI for more details
icon_blacklist_regex: String, true, option;
/// Icon blacklist non global IPs |> Any IP which is not defined as a global IP will be blacklisted.
/// Usefull to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
icon_blacklist_non_global_ips: bool, true, def, true;
/// Reload templates (Dev) |> When this is set to true, the templates get reloaded with every request. ONLY use this during development, as it can slow down the server /// Disable Two-Factor remember |> Enabling this would force the users to use a second factor to login every time.
/// Note that the checkbox would still be present, but ignored.
disable_2fa_remember: bool, true, def, false;
/// Require new device emails |> When a user logs in an email is required to be sent.
/// If sending the email fails the login attempt will fail.
require_device_email: bool, true, def, false;
/// Reload templates (Dev) |> When this is set to true, the templates get reloaded with every request.
/// ONLY use this during development, as it can slow down the server
reload_templates: bool, true, def, false; reload_templates: bool, true, def, false;
/// Log routes at launch (Dev)
log_mounts: bool, true, def, false;
/// Enable extended logging /// Enable extended logging
extended_logging: bool, false, def, true; extended_logging: bool, false, def, true;
/// Enable the log to output to Syslog
use_syslog: bool, false, def, false;
/// Log file path /// Log file path
log_file: String, false, option; log_file: String, false, option;
/// Log level
log_level: String, false, def, "Info".to_string();
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using bitwarden_rs on some exotic filesystems,
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
enable_db_wal: bool, false, def, true;
/// Disable Admin Token (Know the risks!) |> Disables the Admin Token for the admin page so you may use your own auth in-front
disable_admin_token: bool, true, def, false;
}, },
/// Yubikey settings /// Yubikey settings
@@ -255,11 +309,37 @@ make_config! {
/// Client ID /// Client ID
yubico_client_id: String, true, option; yubico_client_id: String, true, option;
/// Secret Key /// Secret Key
yubico_secret_key: String, true, option; yubico_secret_key: Pass, true, option;
/// Server /// Server
yubico_server: String, true, option; yubico_server: String, true, option;
}, },
/// Global Duo settings (Note that users can override them)
duo: _enable_duo {
/// Enabled
_enable_duo: bool, true, def, false;
/// Integration Key
duo_ikey: String, true, option;
/// Secret Key
duo_skey: Pass, true, option;
/// Host
duo_host: String, true, option;
/// Application Key (generated automatically)
_duo_akey: Pass, false, option;
},
/// Email 2FA Settings
email_2fa: _enable_email_2fa {
/// Enabled |> Disabling will prevent users from setting up new email 2FA and using existing email 2FA configured
_enable_email_2fa: bool, true, auto, |c| c._enable_smtp && c.smtp_host.is_some();
/// Token number length |> Length of the numbers in an email token. Minimum of 6. Maximum is 19.
email_token_size: u32, true, def, 6;
/// Token expiration time |> Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
email_expiration_time: u64, true, def, 600;
/// Maximum attempts |> Maximum attempts before an email token is reset and a new email will need to be sent
email_attempts_limit: u64, true, def, 3;
},
/// SMTP Email Settings /// SMTP Email Settings
smtp: _enable_smtp { smtp: _enable_smtp {
/// Enabled /// Enabled
@@ -268,8 +348,10 @@ make_config! {
smtp_host: String, true, option; smtp_host: String, true, option;
/// Enable SSL /// Enable SSL
smtp_ssl: bool, true, def, true; smtp_ssl: bool, true, def, true;
/// Use explicit TLS |> Enabling this would force the use of an explicit TLS connection, instead of upgrading an insecure one with STARTTLS
smtp_explicit_tls: bool, true, def, false;
/// Port /// Port
smtp_port: u16, true, auto, |c| if c.smtp_ssl {587} else {25}; smtp_port: u16, true, auto, |c| if c.smtp_explicit_tls {465} else if c.smtp_ssl {587} else {25};
/// From Address /// From Address
smtp_from: String, true, def, String::new(); smtp_from: String, true, def, String::new();
/// From Name /// From Name
@@ -277,21 +359,50 @@ make_config! {
/// Username /// Username
smtp_username: String, true, option; smtp_username: String, true, option;
/// Password /// Password
smtp_password: String, true, option; smtp_password: Pass, true, option;
/// Json form auth mechanism |> Defaults for ssl is "Plain" and "Login" and nothing for non-ssl connections. Possible values: ["Plain", "Login", "Xoauth2"]
smtp_auth_mechanism: String, true, option;
}, },
} }
fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
if cfg.yubico_client_id.is_some() != cfg.yubico_secret_key.is_some() { if let Some(ref token) = cfg.admin_token {
if token.trim().is_empty() {
err!("`ADMIN_TOKEN` is enabled but has an empty value. To enable the admin page without token, use `DISABLE_ADMIN_TOKEN`")
}
}
if cfg._enable_duo
&& (cfg.duo_host.is_some() || cfg.duo_ikey.is_some() || cfg.duo_skey.is_some())
&& !(cfg.duo_host.is_some() && cfg.duo_ikey.is_some() && cfg.duo_skey.is_some())
{
err!("All Duo options need to be set for global Duo support")
}
if cfg._enable_yubico && cfg.yubico_client_id.is_some() != cfg.yubico_secret_key.is_some() {
err!("Both `YUBICO_CLIENT_ID` and `YUBICO_SECRET_KEY` need to be set for Yubikey OTP support") err!("Both `YUBICO_CLIENT_ID` and `YUBICO_SECRET_KEY` need to be set for Yubikey OTP support")
} }
if cfg.smtp_host.is_some() == cfg.smtp_from.is_empty() { if cfg._enable_smtp {
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support") if cfg.smtp_host.is_some() == cfg.smtp_from.is_empty() {
} err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support")
}
if cfg.smtp_username.is_some() != cfg.smtp_password.is_some() { if cfg.smtp_username.is_some() != cfg.smtp_password.is_some() {
err!("Both `SMTP_USERNAME` and `SMTP_PASSWORD` need to be set to enable email authentication") err!("Both `SMTP_USERNAME` and `SMTP_PASSWORD` need to be set to enable email authentication")
}
if cfg._enable_email_2fa && (!cfg._enable_smtp || cfg.smtp_host.is_none()) {
err!("To enable email 2FA, SMTP must be configured")
}
if cfg._enable_email_2fa && cfg.email_token_size < 6 {
err!("`EMAIL_TOKEN_SIZE` has a minimum size of 6")
}
if cfg._enable_email_2fa && cfg.email_token_size > 19 {
err!("`EMAIL_TOKEN_SIZE` has a maximum size of 19")
}
} }
Ok(()) Ok(())
@@ -304,7 +415,7 @@ impl Config {
let _usr = ConfigBuilder::from_file(&CONFIG_FILE).unwrap_or_default(); let _usr = ConfigBuilder::from_file(&CONFIG_FILE).unwrap_or_default();
// Create merged config, config file overwrites env // Create merged config, config file overwrites env
let builder = _env.merge(&_usr); let builder = _env.merge(&_usr, true);
// Fill any missing with defaults // Fill any missing with defaults
let config = builder.build(); let config = builder.build();
@@ -333,7 +444,7 @@ impl Config {
// Prepare the combined config // Prepare the combined config
let config = { let config = {
let env = &self.inner.read().unwrap()._env; let env = &self.inner.read().unwrap()._env;
env.merge(&builder).build() env.merge(&builder, false).build()
}; };
validate_config(&config)?; validate_config(&config)?;
@@ -352,6 +463,14 @@ impl Config {
Ok(()) Ok(())
} }
pub fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
let builder = {
let usr = &self.inner.read().unwrap()._usr;
usr.merge(&other, false)
};
self.update_config(builder)
}
pub fn delete_user_config(&self) -> Result<(), Error> { pub fn delete_user_config(&self) -> Result<(), Error> {
crate::util::delete_file(&CONFIG_FILE)?; crate::util::delete_file(&CONFIG_FILE)?;
@@ -387,9 +506,21 @@ impl Config {
let inner = &self.inner.read().unwrap().config; let inner = &self.inner.read().unwrap().config;
inner._enable_smtp && inner.smtp_host.is_some() inner._enable_smtp && inner.smtp_host.is_some()
} }
pub fn yubico_enabled(&self) -> bool {
let inner = &self.inner.read().unwrap().config; pub fn get_duo_akey(&self) -> String {
inner._enable_yubico && inner.yubico_client_id.is_some() && inner.yubico_secret_key.is_some() if let Some(akey) = self._duo_akey() {
akey
} else {
let akey = crate::crypto::get_random_64();
let akey_s = data_encoding::BASE64.encode(&akey);
// Save the new value
let mut builder = ConfigBuilder::default();
builder._duo_akey = Some(akey_s.clone());
self.update_config_partial(builder).ok();
akey_s
}
} }
pub fn render_template<T: serde::ser::Serialize>( pub fn render_template<T: serde::ser::Serialize>(
@@ -416,21 +547,29 @@ fn load_templates(path: &str) -> Handlebars {
let mut hb = Handlebars::new(); let mut hb = Handlebars::new();
// Error on missing params // Error on missing params
hb.set_strict_mode(true); hb.set_strict_mode(true);
// Register helpers
hb.register_helper("case", Box::new(CaseHelper)); hb.register_helper("case", Box::new(CaseHelper));
hb.register_helper("jsesc", Box::new(JsEscapeHelper));
macro_rules! reg { macro_rules! reg {
($name:expr) => {{ ($name:expr) => {{
let template = include_str!(concat!("static/templates/", $name, ".hbs")); let template = include_str!(concat!("static/templates/", $name, ".hbs"));
hb.register_template_string($name, template).unwrap(); hb.register_template_string($name, template).unwrap();
}}; }};
($name:expr, $ext:expr) => {{
reg!($name);
reg!(concat!($name, $ext));
}};
} }
// First register default templates here // First register default templates here
reg!("email/invite_accepted"); reg!("email/invite_accepted", ".html");
reg!("email/invite_confirmed"); reg!("email/invite_confirmed", ".html");
reg!("email/pw_hint_none"); reg!("email/new_device_logged_in", ".html");
reg!("email/pw_hint_some"); reg!("email/pw_hint_none", ".html");
reg!("email/send_org_invite"); reg!("email/pw_hint_some", ".html");
reg!("email/send_org_invite", ".html");
reg!("email/twofactor_email", ".html");
reg!("admin/base"); reg!("admin/base");
reg!("admin/login"); reg!("admin/login");
@@ -444,7 +583,6 @@ fn load_templates(path: &str) -> Handlebars {
hb hb
} }
#[derive(Clone, Copy)]
pub struct CaseHelper; pub struct CaseHelper;
impl HelperDef for CaseHelper { impl HelperDef for CaseHelper {
@@ -454,7 +592,7 @@ impl HelperDef for CaseHelper {
r: &'reg Handlebars, r: &'reg Handlebars,
ctx: &Context, ctx: &Context,
rc: &mut RenderContext<'reg>, rc: &mut RenderContext<'reg>,
out: &mut Output, out: &mut dyn Output,
) -> HelperResult { ) -> HelperResult {
let param = h let param = h
.param(0) .param(0)
@@ -468,3 +606,31 @@ impl HelperDef for CaseHelper {
} }
} }
} }
pub struct JsEscapeHelper;
impl HelperDef for JsEscapeHelper {
fn call<'reg: 'rc, 'rc>(
&self,
h: &Helper<'reg, 'rc>,
_: &'reg Handlebars,
_: &Context,
_: &mut RenderContext<'reg>,
out: &mut dyn Output,
) -> HelperResult {
let param = h
.param(0)
.ok_or_else(|| RenderError::new("Param not found for helper \"js_escape\""))?;
let value = param
.value()
.as_str()
.ok_or_else(|| RenderError::new("Param for helper \"js_escape\" is not a String"))?;
let escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
let quoted_value = format!("&quot;{}&quot;", escaped_value);
out.write(&quoted_value)?;
Ok(())
}
}

View File

@@ -2,7 +2,8 @@
// PBKDF2 derivation // PBKDF2 derivation
// //
use ring::{digest, pbkdf2}; use ring::{digest, hmac, pbkdf2};
use std::num::NonZeroU32;
static DIGEST_ALG: &digest::Algorithm = &digest::SHA256; static DIGEST_ALG: &digest::Algorithm = &digest::SHA256;
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN; const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
@@ -10,15 +11,29 @@ const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> { pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {
let mut out = vec![0u8; OUTPUT_LEN]; // Initialize array with zeros let mut out = vec![0u8; OUTPUT_LEN]; // Initialize array with zeros
let iterations = NonZeroU32::new(iterations).expect("Iterations can't be zero");
pbkdf2::derive(DIGEST_ALG, iterations, salt, secret, &mut out); pbkdf2::derive(DIGEST_ALG, iterations, salt, secret, &mut out);
out out
} }
pub fn verify_password_hash(secret: &[u8], salt: &[u8], previous: &[u8], iterations: u32) -> bool { pub fn verify_password_hash(secret: &[u8], salt: &[u8], previous: &[u8], iterations: u32) -> bool {
let iterations = NonZeroU32::new(iterations).expect("Iterations can't be zero");
pbkdf2::verify(DIGEST_ALG, iterations, salt, secret, previous).is_ok() pbkdf2::verify(DIGEST_ALG, iterations, salt, secret, previous).is_ok()
} }
//
// HMAC
//
pub fn hmac_sign(key: &str, data: &str) -> String {
use data_encoding::HEXLOWER;
let key = hmac::SigningKey::new(&digest::SHA1, key.as_bytes());
let signature = hmac::sign(&key, data.as_bytes());
HEXLOWER.encode(signature.as_ref())
}
// //
// Random values // Random values
// //
@@ -36,3 +51,12 @@ pub fn get_random(mut array: Vec<u8>) -> Vec<u8> {
array array
} }
//
// Constant time compare
//
pub fn ct_eq<T: AsRef<[u8]>, U: AsRef<[u8]>>(a: T, b: U) -> bool {
use ring::constant_time::verify_slices_are_equal;
verify_slices_are_equal(a.as_ref(), b.as_ref()).is_ok()
}

View File

@@ -2,25 +2,41 @@ use std::ops::Deref;
use diesel::r2d2; use diesel::r2d2;
use diesel::r2d2::ConnectionManager; use diesel::r2d2::ConnectionManager;
use diesel::sqlite::SqliteConnection;
use diesel::{Connection as DieselConnection, ConnectionError}; use diesel::{Connection as DieselConnection, ConnectionError};
use rocket::http::Status; use rocket::http::Status;
use rocket::request::{self, FromRequest}; use rocket::request::{self, FromRequest};
use rocket::{Outcome, Request, State}; use rocket::{Outcome, Request, State};
use crate::error::Error;
use chrono::prelude::*;
use std::process::Command;
use crate::CONFIG; use crate::CONFIG;
/// An alias to the database connection used /// An alias to the database connection used
type Connection = SqliteConnection; #[cfg(feature = "sqlite")]
type Connection = diesel::sqlite::SqliteConnection;
#[cfg(feature = "mysql")]
type Connection = diesel::mysql::MysqlConnection;
#[cfg(feature = "postgresql")]
type Connection = diesel::pg::PgConnection;
/// An alias to the type for a pool of Diesel SQLite connections. /// An alias to the type for a pool of Diesel connections.
type Pool = r2d2::Pool<ConnectionManager<Connection>>; type Pool = r2d2::Pool<ConnectionManager<Connection>>;
/// Connection request guard type: a wrapper around an r2d2 pooled connection. /// Connection request guard type: a wrapper around an r2d2 pooled connection.
pub struct DbConn(pub r2d2::PooledConnection<ConnectionManager<Connection>>); pub struct DbConn(pub r2d2::PooledConnection<ConnectionManager<Connection>>);
pub mod models; pub mod models;
#[cfg(feature = "sqlite")]
#[path = "schemas/sqlite/schema.rs"]
pub mod schema;
#[cfg(feature = "mysql")]
#[path = "schemas/mysql/schema.rs"]
pub mod schema;
#[cfg(feature = "postgresql")]
#[path = "schemas/postgresql/schema.rs"]
pub mod schema; pub mod schema;
/// Initializes a database pool. /// Initializes a database pool.
@@ -34,6 +50,21 @@ pub fn get_connection() -> Result<Connection, ConnectionError> {
Connection::establish(&CONFIG.database_url()) Connection::establish(&CONFIG.database_url())
} }
/// Creates a back-up of the database using sqlite3
pub fn backup_database() -> Result<(), Error> {
let now: DateTime<Utc> = Utc::now();
let file_date = now.format("%Y%m%d").to_string();
let backup_command: String = format!("{}{}{}", ".backup 'db_", file_date, ".sqlite3'");
Command::new("sqlite3")
.current_dir("./data")
.args(&["db.sqlite3", &backup_command])
.output()
.expect("Can't open database, sqlite3 is not available, make sure it's installed and available on the PATH");
Ok(())
}
/// Attempts to retrieve a single connection from the managed database pool. If /// Attempts to retrieve a single connection from the managed database pool. If
/// no pool is currently managed, fails with an `InternalServerError` status. If /// no pool is currently managed, fails with an `InternalServerError` status. If
/// no connections are available, fails with a `ServiceUnavailable` status. /// no connections are available, fails with a `ServiceUnavailable` status.

View File

@@ -3,7 +3,7 @@ use serde_json::Value;
use super::Cipher; use super::Cipher;
use crate::CONFIG; use crate::CONFIG;
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)] #[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
#[table_name = "attachments"] #[table_name = "attachments"]
#[belongs_to(Cipher, foreign_key = "cipher_uuid")] #[belongs_to(Cipher, foreign_key = "cipher_uuid")]
#[primary_key(id)] #[primary_key(id)]
@@ -12,7 +12,7 @@ pub struct Attachment {
pub cipher_uuid: String, pub cipher_uuid: String,
pub file_name: String, pub file_name: String,
pub file_size: i32, pub file_size: i32,
pub key: Option<String>, pub akey: Option<String>,
} }
/// Local methods /// Local methods
@@ -23,7 +23,7 @@ impl Attachment {
cipher_uuid, cipher_uuid,
file_name, file_name,
file_size, file_size,
key: None, akey: None,
} }
} }
@@ -43,7 +43,7 @@ impl Attachment {
"FileName": self.file_name, "FileName": self.file_name,
"Size": self.file_size.to_string(), "Size": self.file_size.to_string(),
"SizeName": display_size, "SizeName": display_size,
"Key": self.key, "Key": self.akey,
"Object": "attachment" "Object": "attachment"
}) })
} }
@@ -59,6 +59,18 @@ use crate::error::MapResult;
/// Database methods /// Database methods
impl Attachment { impl Attachment {
#[cfg(feature = "postgresql")]
pub fn save(&self, conn: &DbConn) -> EmptyResult {
diesel::insert_into(attachments::table)
.values(self)
.on_conflict(attachments::id)
.do_update()
.set(self)
.execute(&**conn)
.map_res("Error saving attachment")
}
#[cfg(not(feature = "postgresql"))]
pub fn save(&self, conn: &DbConn) -> EmptyResult { pub fn save(&self, conn: &DbConn) -> EmptyResult {
diesel::replace_into(attachments::table) diesel::replace_into(attachments::table)
.values(self) .values(self)

Some files were not shown because too many files have changed in this diff Show More