Compare commits

...

279 Commits

Author SHA1 Message Date
Daniel García
cbadf00941 Update web vault to fix twofactorauth.org integration
Update dependencies and toolchain
Update included equivalent domains with upstream changes
2019-11-19 20:30:09 +01:00
Daniel García
c5b7447dac Merge pull request #728 from tomuta/signups_domains_whitelist
Add the ability to disable signups, but allow signups from a whitelist
2019-11-16 23:19:20 +01:00
tomuta
64d6f72e6c Add the ability to disable signups, but allow signups from a whitelist
This feature can be enabled by setting SIGNUPS_ALLOWED=false and
providing a comma-separated list of whitelisted domains in
SIGNUPS_DOMAINS_WHITELIST.

Fixes #727
2019-11-16 15:01:45 -07:00
Daniel García
a19a6fb016 Merge pull request #725 from ntimo/task/add-tvapplecom-globaldomains
Added tv.apple.com to global domains
2019-11-15 00:30:56 +01:00
Timo N
b889e5185e Added tv.apple.com to global domains 2019-11-14 23:10:55 +01:00
Daniel García
cd83a9e7b2 Merge pull request #720 from gnu300/master
cleaner startup exec in order to build the image and run the containe…
2019-11-13 22:45:06 +01:00
Gernot Nusshall
748c825202 cleaner startup exec in order to build the image and run the container with podman/libpod 2019-11-13 22:11:09 +01:00
Gernot Nusshall
204993568a cleaner startup exec in order to build the image and run the container with podman/libpod 2019-11-13 21:45:26 +01:00
Gernot Nusshall
70be2d93ce cleaner startup exec in order to build the image and run the container with podman/libpod 2019-11-13 13:45:05 +01:00
Daniel García
f5638716d2 Merge pull request #716 from ThomDietrich/patch-1
Add vim modeline for GitHub Linguist
2019-11-11 18:41:21 +01:00
Thomas Dietrich
fbc2fad9c9 Add vim modeline for GitHub Linguist 2019-11-11 11:19:58 +01:00
Daniel García
3f39e35123 Merge pull request #713 from BlackDex/issue-705
Fixed issue/request #705
2019-11-07 20:28:49 +01:00
BlackDex
3f6809bcdf Fixed issue/request #705
Added a config option to disable time drifted totp codes.
Default is false, since this is what the RFC recommends.
2019-11-07 17:11:29 +01:00
Daniel García
9ff577a7b4 Merge pull request #711 from BlackDex/issue-706
Added configurable smtp timeout.
2019-11-06 21:54:30 +01:00
BlackDex
c52adef919 Added configurable smtp timeout.
- Added config option for smtp timeout
 - Lowered default timeout to 15 seconds instead of default 60.
2019-11-06 21:39:33 +01:00
BlackDex
cbb92bcbc0 Updated dependencies
Updated some dependencies and used a git patch for lettre addressing
timeouts.
2019-11-06 21:37:51 +01:00
Daniel García
948798a84f Merge pull request #710 from BlackDex/issue-709
Fixed issue #709 creating icon_cache directory.
2019-11-06 21:35:04 +01:00
BlackDex
2ffc3eac4d Clippy fix 2019-11-06 20:34:52 +01:00
BlackDex
0ff7fd939e Next attempt for issue #709 fix
Now creates icon cache directory at startup.
And it also creates the directory if it went missing during runtime.
Also modified the icon_save/mark_negcache to be one.
2019-11-06 20:21:47 +01:00
BlackDex
ca7c5129b2 Fixed issue #709 creating icon_cache directory.
When the icon_cache directory doesn't exists yet, and the first icon
catched is a miss this .miss file was not able to be created since the
directory was only created during a valid icon download.
2019-11-06 15:47:56 +01:00
Daniel García
07e0fdbd2a Merge pull request #704 from patrickli/bugfix/dockerfiles
Don't install mysql libraries for sqlite builds
2019-11-05 18:48:06 +01:00
Daniel García
b4dfc24040 Merge pull request #703 from patrickli/bugfix/dont-sync-excluded-global-domains
Don't include excluded global equivalent domains during sync
2019-11-05 18:47:43 +01:00
Patrick Li
85dbf4e16c Don't include excluded global equivalent domains during sync
Fixes #681
2019-11-05 21:29:04 +13:00
Patrick Li
efc65b93f8 Don't install mysql libraries for sqlite builds 2019-11-05 16:08:41 +13:00
Daniel García
9a0fe6f617 Merge pull request #701 from BlackDex/issue-687
Trying to fix issue #687
2019-11-04 14:44:52 +01:00
BlackDex
3442eb1b9d Trying to fix issue #687
- Using an older commit from rocket repo
2019-11-04 14:30:24 +01:00
Daniel García
e449912f05 Generate recovery codes for email and duo 2019-11-02 18:31:50 +01:00
Daniel García
72a46fb386 Update dependencies 2019-11-02 17:39:27 +01:00
Daniel García
d29b6bee28 Remove unnecessary clones and other clippy fixes 2019-11-02 17:39:01 +01:00
Daniel García
e2e3712921 Merge pull request #695 from mprasil/do-not-leak-usernames
Stop leaking usernames when SIGNUPS_ALLOWED=false
2019-11-02 00:12:53 +01:00
Miro Prasil
00a11b1b78 Stop leaking usernames when SIGNUPS_ALLOWED=false
This fixes #691 - respond in less specific way to not leak the
fact that user is already registered on the server.
2019-11-01 22:34:42 +00:00
Daniel García
77b78f0991 Merge pull request #690 from BlackDex/icon-download-http
Added http favicon url when response failed
2019-10-29 15:02:59 +01:00
BlackDex
ee550be80c Added http favicon url when response failed 2019-10-29 14:24:01 +01:00
Daniel García
97d41c2686 Revert rustup minimal profile, rustup can't be updated 2019-10-26 00:55:58 +02:00
Daniel García
fccc0a4b05 Update rocket to latest master
Downgrade rust version to fix cargo issue
Set rustup profile to minimal
2019-10-25 21:48:10 +02:00
Daniel García
57b1d3f850 Update dependencies and docker base images 2019-10-24 20:37:17 +02:00
Daniel García
77d40833d9 Merge pull request #679 from mprasil/bump-rust-toolchain
Bump rust toolchain
2019-10-22 19:18:43 +02:00
Miro Prasil
7814218208 Bump rust toolchain
This is as per #622 that sshould resolve issues building on armv7.
2019-10-22 16:31:36 +01:00
Daniel García
95a7ffdf6b Merge pull request #673 from Jellyfrog/patch-2
Remove unneeded WS logging
2019-10-17 20:21:47 +02:00
Jellyfrog
ebc47dc161 Remove unneeded WS logging 2019-10-17 17:15:11 +02:00
Daniel García
cd8acc2e8c Merge pull request #671 from vverst/enable-2fa-email
Move 2FA email config to after SMTP config
2019-10-16 19:55:54 +02:00
vpl
3b7a5bd102 Move 2FA email config to after SMTP config 2019-10-16 07:11:16 +02:00
Daniel García
d3054d4f83 Merge pull request #667 from dani-garcia/minimal_profile
Update rust version and use minimal profile for CI
2019-10-15 22:26:12 +02:00
Daniel García
5ac66b05e3 Merge pull request #666 from vverst/fix-2fa-email
Fix 2FA email not sending
2019-10-15 22:25:37 +02:00
Daniel García
83fd44eeef Update rust version and use minimal profile for CI 2019-10-15 21:21:37 +02:00
vpl
2edecf34ff Use user_uuid instead of mut twofactor 2019-10-15 21:20:19 +02:00
vpl
18bc8331f9 Send email when preparing 2FA JsonError 2019-10-15 21:19:49 +02:00
Daniel García
7d956c5117 Merge pull request #664 from BlackDex/fix-issue-663
Fixed issue #663.
2019-10-14 01:25:26 +02:00
BlackDex
603a964579 Fixed issue #663.
During the 2fa activation there is no twofactor record yet.
Changed the layout a bit so that it will generate a new twofactor record
when it does not exists yet. Else it will just update the already
existing record.
2019-10-14 00:32:44 +02:00
Daniel García
dc515b83f3 Merge pull request #657 from BlackDex/totp-timedrift
Updated authenticator TOTP
2019-10-12 16:33:43 +02:00
BlackDex
9466f02696 Recoded TOTP time drift validation 2019-10-12 15:28:28 +02:00
Daniel García
d3bd2774dc Update dependencies to use newer SQLite 2019-10-11 22:49:47 +02:00
Daniel García
f482585d7c Merge pull request #660 from BlackDex/sqlite-backup-fix
Fixed a bug with the sqlite backup feature.
2019-10-11 15:07:21 +02:00
BlackDex
2cde814aaa Fixed a bug with the sqlite backup feature.
When a custom path is used the backup feature does not work.
Changed it so it will take the path of the sqlite file and use that.
2019-10-11 12:08:40 +02:00
BlackDex
d989a19f76 Merge branch 'master' of https://github.com/dani-garcia/bitwarden_rs into totp-timedrift 2019-10-11 11:22:13 +02:00
Daniel García
d292269ea0 Make the blacklist logic be cached 2019-10-10 23:21:22 +02:00
BlackDex
ebf40099f2 Updated authenticator TOTP
- Added security check for previouse used codes
- Allow TOTP codes with 1 step back and forward when there is a time
drift. This means in total 3 codes could be valid. But only newer codes
then the previouse used codes are excepted after that.
2019-10-10 17:32:20 +02:00
Daniel García
0586c00285 Merge pull request #653 from stevesbrain/master
Simple grammar update
2019-10-10 01:06:51 +02:00
Steve Divskinsy
bb9ddd5680 Merge pull request #1 from stevesbrain/stevesbrain-patch-1
Very simple grammar updates
2019-10-09 22:23:20 +10:30
Steve Divskinsy
cb1663fc12 Very simple grammar updates
Just some basic grammar updates in the "get in touch" section.
2019-10-09 22:22:52 +10:30
Daniel García
45d9d8db94 Merge pull request #652 from BlackDex/hibp-changes
Some modification when no HIBP API Key is set
2019-10-09 00:44:00 +02:00
BlackDex
edc482c8ea Changed HIBP Error message.
- Moved the manual link to the check to the top.
- Clearified that hibp is a payed service.
- Changed error logo to hibp logo.
2019-10-08 22:29:12 +02:00
BlackDex
6e5c03cc78 Some modification when no HIBP API Key is set
- Added an URL with the useraccount for manual check.
- Added support for HTTP(S)_PROXY for hibp.
2019-10-08 21:39:11 +02:00
Daniel García
881c1978eb Error when the URL scheme doesn't match the database type 2019-10-08 19:34:47 +02:00
Daniel García
662bc27523 Updated dependencies and fixed disable_admin_token description 2019-10-08 19:33:27 +02:00
Daniel García
b4b62c22a4 Merge pull request #648 from BlackDex/icon-security
Added missing .env configuration option.
2019-10-08 18:08:32 +02:00
BlackDex
05569147af Added missing .env configuration option. 2019-10-08 13:30:17 +02:00
Daniel García
99a635d327 Merge pull request #643 from BlackDex/icon-security
Updated icon blacklisting.
2019-10-05 17:06:14 +02:00
Daniel García
e6b763026e Merge branch 'master' into icon-security 2019-10-05 16:45:36 +02:00
Daniel García
c182583e09 Merge pull request #644 from BlackDex/issue-565
Fixed issue #565
2019-10-05 16:17:43 +02:00
Daniel García
d821389c2e Merge pull request #639 from vverst/cors-update
Change CORS headers
2019-10-05 16:09:33 +02:00
BlackDex
be2916333b Fixed issue #565
Issue fixed by omitting the cookie header when cookie_str is empty
2019-10-05 15:45:09 +02:00
BlackDex
9124d8a3fb Updated icon blacklisting.
- Blacklisting was not effective for redirects and rel href
- Able to blacklist non global IP's like RFC1918, multicast etc...
2019-10-05 14:48:15 +02:00
vpl
7b1da527a6 Change CORS headers
Only add Allow-Origin to all requests and move the others to preflight OPTIONS request.
If Origin is `file://` change it to the wildcard.
2019-10-01 20:12:33 +02:00
Daniel García
e7b8602e1f Merge pull request #638 from mprasil/add_sqlite_binary
Add sqlite binary into the docker images
2019-10-01 19:50:41 +02:00
Miro Prasil
d6e9af909b Remove the unnecessary check for sqlite
The binary we use is called `sqlite3` so no need to check for other
name variants as we won't use those anyways.
2019-10-01 10:40:22 +01:00
Miro Prasil
acdd42935b Add sqlite binary into the docker images
This is done to enable backup functionality in the admin interface while
we're waiting for the libsqlite-sys 0.17 to bubble up in the upstream
dependencies. Then we can start using `VACUUM INTO`

This also extends the check for the sqlite binary to also try `sqlite3`
as this is the name of the binary in baseimage distributions we use.
2019-09-30 13:54:06 +01:00
Daniel García
8367d1d715 Merge pull request #631 from vverst/cors-put
Use Access-Control-Allow-Method
2019-09-23 20:03:51 +02:00
vpl
56f12dc982 Use Access-Control-Allow-Method 2019-09-23 07:44:44 +02:00
Daniel García
4c07f05b3a Remove Result<T, E: Debug> in preparation of deprecation as Rocket responder.
Removed unnecessary returns
2019-09-17 21:05:56 +02:00
Daniel García
b73ff886c3 Use upstream rmp 2019-09-17 19:47:51 +02:00
Daniel García
2e7bd62353 Merge pull request #624 from swedishborgie/postgresql
Fix issue with downloading attachments and PostgreSQL backend.
2019-09-17 18:50:40 +02:00
Michael Powers
1264eb640a Added a migration that fixes #1 which caused attachments to be broken
for the PostgreSQL backend. Also converts any CHAR types to VARCHAR to prevent the same issue from causing problems down the line.
2019-09-16 19:52:00 -04:00
Daniel García
3a90364b32 Merge pull request #621 from swedishborgie/postgresql
Adds support for PostgreSQL which adds #87 and is mentioned in #246.
2019-09-16 20:05:05 +02:00
Michael Powers
f5f9861a78 Adds support for PostgreSQL which resolves #87 and is mentioned in #246.
This includes migrations as well as Dockerfile's for amd64.

The biggest change is that replace_into isn't supported by Diesel for the
PostgreSQL backend, instead requiring the use of on_conflict. This
unfortunately requires a branch for save() on all of the models currently
using replace_into.
2019-09-12 16:12:22 -04:00
Daniel García
f9408a00c6 Allow self signed certs and increase a bit the timings 2019-09-11 22:01:42 +02:00
Daniel García
ae8bf954c1 Updated web vault to 2.12 2019-09-07 22:13:56 +02:00
Daniel García
c656f2f694 Merge pull request #604 from mprasil/fix-healthcheck
Fix #603 and  remove mysql from sqlite image
2019-09-06 10:47:10 +02:00
Miro Prasil
eea3f13bb3 Fix #603 and remove mysql from sqlite image
This changes the healthcheck to use `sh` instead of bash, that is absent
from some image versions. (like alpine)

It also removes `*mariadb*` packages from runtime image of sqlite images
as these shouldn't be required.
2019-09-06 09:34:21 +01:00
Daniel García
df8114f8be Updated client kdf iterations to 100000 and fixed some lints 2019-09-05 21:56:12 +02:00
Daniel García
dda244edd8 Merge pull request #589 from H3npi/H3npi-patch-1
Adds Healthcheck for default docker container
2019-09-05 19:47:10 +02:00
H3npi
cce3ce816c Adds environment port to curl healthcheck 2019-09-04 09:12:53 +02:00
Daniel García
65c0d1064b Merge pull request #599 from vverst/cors
Add Cors headers
2019-09-03 20:22:54 +02:00
vpl
5a2f968d7a Set correct response headers, status code 2019-09-02 21:13:12 +02:00
vpl
16d88402cb Initial version of CORS support 2019-09-01 13:00:12 +02:00
Daniel García
7dcf18151d Fix onsubmit 2019-08-31 17:57:47 +02:00
Daniel García
e3404dd322 Use the local scripts instead of cloudflare, remove jquery and update config so disabling a master toggle doesn't remove the values 2019-08-31 17:47:52 +02:00
Daniel García
bfc517ee80 Remove unused warning 2019-08-31 17:26:16 +02:00
Daniel García
4a7d2a1e28 Rename static files endpoint 2019-08-31 17:25:31 +02:00
H3npi
66a68f6d22 Adds Healthcheck for all docker container 2019-08-29 09:02:02 +02:00
Daniel García
469318bcbd Updated dependencies and web vault version 2019-08-27 21:14:15 +02:00
Daniel García
c07c9995ea Merge pull request #555 from vverst/email-codes
Add Email 2FA login
2019-08-27 21:07:41 +02:00
Daniel García
2c2276c5bb Merge pull request #585 from ViViDboarder/mail-auth-over-insecure
Allow explicitly defined smtp auth mechansim
2019-08-27 20:21:23 +02:00
ViViDboarder
672a245548 Remove unecessary clone 2019-08-27 10:40:38 -07:00
vpl
5d50b1ee3c Merge remote-tracking branch 'upstream/master' into email-codes 2019-08-26 21:38:45 +02:00
vpl
c99df1c310 Compare token using crypto::ct_eq 2019-08-26 20:26:59 +02:00
vpl
591ae10144 Get token from single u64 2019-08-26 20:26:54 +02:00
ViViDboarder
2d2745195e Allow explicitly defined smtp auth mechansim 2019-08-23 16:22:14 -07:00
Daniel García
026f9da035 Allow removing users two factors 2019-08-21 17:13:06 +02:00
Daniel García
d23d4f2c1d Allow editing HIBP key in the admin panel 2019-08-20 23:53:00 +02:00
Daniel García
515b87755a Update HIBP to v3, requires paid API key, fixes #583 2019-08-20 20:07:12 +02:00
Daniel García
d8ea3d2bfe Merge pull request #582 from vverst/require-device-email-config
Add config option to require new device emails
2019-08-19 22:58:50 +02:00
vpl
ee7837d022 Add option to require new device emails 2019-08-19 22:14:00 +02:00
Daniel García
07743e490b Ignore error sending device email 2019-08-18 19:32:26 +02:00
Daniel García
9101d6e48f Update dependencies 2019-08-18 19:31:54 +02:00
Daniel García
27c23b60b8 Merge pull request #571 from BlackDex/icon-proxy-support
Added reqwest proxy support
2019-08-15 22:14:10 +02:00
BlackDex
e7b6238f43 Added reqwest proxy support 2019-08-12 17:24:32 +02:00
vpl
ad2225b6e5 Add configuration options for Email 2FA 2019-08-10 22:39:04 +02:00
vpl
5609103a97 Use ring to generate email token 2019-08-06 22:38:08 +02:00
vpl
6d460b44b0 Use saved token for email 2fa codes 2019-08-04 17:21:57 +02:00
vpl
efd8d9f528 Remove some unused imports, unneeded mut variables 2019-08-04 16:56:41 +02:00
vpl
29aedd388e Add email code logic and move two_factor into separate modules 2019-08-04 16:56:41 +02:00
vpl
27e0e41835 Add email authenticator logic 2019-08-04 16:56:39 +02:00
vpl
0b60f20eb3 Add email message for twofactor email codes 2019-08-03 18:49:34 +02:00
Daniel García
8be2ed6255 Update web vault to 2.11.0 2019-07-30 19:50:35 +02:00
Daniel García
c9c3f07171 Updated dependencies and fixed panic getting icons 2019-07-30 19:42:05 +02:00
Daniel García
8a21c6df10 Merge pull request #541 from vverst/mail-new-device
Add "New Device Logged In From" email
2019-07-30 13:18:42 -04:00
vpl
df71f57d86 Move send device email to end of password login
Send new device email after two factor authentication.
2019-07-25 21:10:27 +02:00
vpl
60e39a9dd1 Move retrieve/new device from connData to separate function 2019-07-22 12:30:26 +02:00
vpl
bc6a53b847 Add new device email when user logs in 2019-07-22 08:26:24 +02:00
Daniel García
05a1137828 Move backend checks to build.rs to fail fast, and updated dependencies 2019-07-09 17:26:34 +02:00
Daniel García
cef38bf40b Merge pull request #525 from fbartels/hadolint
use hadolint for linting Dockerfiles
2019-07-09 17:22:27 +02:00
Felix Bartels
0b13a8c4aa last round of linting fixes
Signed-off-by: Felix Bartels <felix@host-consultants.de>
2019-07-06 08:36:18 +02:00
Felix Bartels
3fbd7919d8 more linting fixes
Signed-off-by: Felix Bartels <felix@host-consultants.de>
2019-07-06 08:16:05 +02:00
Felix Bartels
5f688ff209 no more linting errors for the main Dockerfile
Signed-off-by: Felix Bartels <felix@host-consultants.de>
2019-07-05 22:45:29 +02:00
Felix Bartels
f6cfb5bf21 add hadolint config file
to globally ignore certain rules
2019-07-05 11:06:44 +02:00
Felix Bartels
df8c9f39ac add hadolint to travisfile
Signed-off-by: Felix Bartels <felix@host-consultants.de>
2019-07-04 15:59:50 +02:00
Daniel García
d7ee7caed4 Merge pull request #520 from njfox/fix-email-alias
Fix #468 - Percent-encode the email address in invite link
2019-07-03 22:42:42 +02:00
Nick Fox
2e300da057 Fix #468 - Percent-encode the email address in invite link 2019-07-02 22:55:13 -04:00
Daniel García
3fb63bbe8c Merge pull request #514 from mprasil/dockerfile_cleanup
Dockerfile cleanup
2019-06-26 17:20:10 +02:00
Miro Prasil
9671ed4cca Symlink amd64 Dockerfile to repo root 2019-06-24 09:59:43 +01:00
Miro Prasil
d10ef3fd4b Create Dockerfiles for mysql builds 2019-06-24 09:56:26 +01:00
Miro Prasil
dd0b847912 Move current dockerfiles to their arch folders 2019-06-24 09:52:55 +01:00
Daniel García
8c34ff5d23 Merge pull request #511 from CubityFirst/patch-1
Corrected Spelling
2019-06-18 18:28:00 +02:00
Daniel García
15750256e2 Merge pull request #510 from mprasil/armv6_fix
Making a symlink is no longer necessary
2019-06-18 18:27:47 +02:00
Cubity_First
6989fc7bdb Corrected Spelling
Changed it from Chache to Cache on Line 207
2019-06-18 15:45:19 +01:00
Miro Prasil
4923614730 Making a symlink is no longer necessary 2019-06-17 12:16:26 +01:00
Daniel García
76f38621de Update dependencies and remove unwraps from Cipher::to_json 2019-06-14 22:51:50 +02:00
Daniel García
fff72889f6 Document DB URL in .env file 2019-06-02 13:44:59 +02:00
Daniel García
12af32b9ea Don't print DB URL 2019-06-02 13:39:16 +02:00
Daniel García
9add8e19eb Update dependencies and remove travis unused feature 2019-06-02 00:28:20 +02:00
Daniel García
5710703c50 Make sure the backup option only appears when using sqlite 2019-06-02 00:08:52 +02:00
Daniel García
1322b876e9 Merge pull request #493 from endyman/feature/initial_mysql_support
Initial support for mysql
2019-06-01 23:33:06 +02:00
Daniel García
9ed2ba61c6 Merge pull request #475 from TheMardy/master
Create Backup funcitonality
2019-06-01 23:29:58 +02:00
Nils Domrose
62a461ae15 remove syslog from ci, make features flag more clear 2019-05-30 22:19:58 +02:00
Nils Domrose
6f7220b68e adapt other Dockerfiles 2019-05-28 11:56:49 +02:00
Nils Domrose
4859932d35 fixed typo 2019-05-28 07:48:17 +02:00
Nils Domrose
ee277de707 include libsqlite3-sys optionally, removed non common features 2019-05-27 23:31:56 +02:00
Nils Domrose
c11f47903a revert include libsqlite3-sys optionally 2019-05-27 23:18:45 +02:00
Nils Domrose
6a5f1613e7 include libsqlite3-sys optionally 2019-05-27 23:07:47 +02:00
Nils Domrose
dc36f0cb6c re-added sqlite check_db code, cleanup 2019-05-27 22:58:52 +02:00
Nils Domrose
6c38026ef5 user char(36) for uuid columns 2019-05-27 17:20:20 +02:00
Nils Domrose
4c9cc9890c adapt travis to not enable conflicting features 2019-05-27 00:41:42 +02:00
Nils Domrose
f57b407c60 fix cargo syntax 2019-05-27 00:29:31 +02:00
Nils Domrose
ce0651b79c fix mysql package in ubuntu 2019-05-27 00:23:42 +02:00
Nils Domrose
edc26cb1e1 adapt pipline to no enable conflicting features 2019-05-27 00:19:59 +02:00
Nils Domrose
ff759397f6 initial mysql support 2019-05-26 23:03:05 +02:00
Emil Madsen
badd22ac3d Make docker image build 2019-05-20 22:36:27 +02:00
Emil Madsen
6f78395ef7 Passwordless sudo on azure? 2019-05-20 21:59:18 +02:00
Emil Madsen
5fb6531db8 Attempt to fix azure pipeline 2019-05-20 21:54:01 +02:00
Emil Madsen
eb9d5e1196 Reintroduce .env.template 2019-05-20 21:34:20 +02:00
Emil Madsen
233b48bdad Fix missing joinable in schema 2019-05-20 21:30:31 +02:00
Emil Madsen
e22e290f67 Fix key and type variable names for mysql 2019-05-20 21:24:29 +02:00
Emil Madsen
ab95a69dc8 Rework migrations for MySQL 2019-05-20 21:12:41 +02:00
Emil Madsen
85c8a01f4a Merge branch 'master' of github.com:Skeen/bitwarden_rs 2019-05-20 19:53:18 +02:00
Emil Madsen
42af7c6dab MySQL database 2019-05-20 19:53:14 +02:00
Daniel García
08a445e2ac Merge pull request #484 from mprasil/hub_repo_change
Point to the new docker hub image location
2019-05-17 15:44:07 +02:00
Daniel García
c0b2877da3 Update deps and swap back to official u2f crate again 2019-05-17 15:39:36 +02:00
Miro Prasil
cf8ca85289 Point to the new docker hub image location 2019-05-16 15:04:51 +01:00
Daniel García
a8a92f6c51 New vault patch release 2019-05-15 18:11:39 +02:00
Daniel García
95f833aacd Update dependencies to use new ring 2019-05-15 18:10:25 +02:00
Daniel García
4f45cc081f Update ring to 0.14, jwt to 6.0, and u2f 2019-05-11 23:18:18 +02:00
Daniel García
2a4cd24c60 Updated web vault to hide org plans again and updated dependencies 2019-05-11 22:27:51 +02:00
TheMardy
ef551f4cc6 Create Backup funcitonality
Added create backup functionality to the admin panel
2019-05-03 15:46:29 +02:00
Daniel García
4545f271c3 Merge pull request #473 from Starbix/patch-1
Update Runtime Base Image to Alpine v3.9
2019-05-02 22:33:43 +02:00
Cédric Laubacher
2768396a72 Update Runtime Base Image to Alpine v3.9 2019-05-02 21:28:34 +02:00
Daniel García
5521a86693 Change path for served images to avoid collision with vault images 2019-05-01 16:19:22 +02:00
Daniel García
3160780549 Merge pull request #401 from TheMardy/master
Images in Email Templates
2019-04-30 17:52:10 +02:00
TheMardy
f0701657a9 Changed to Bitwarden_RS Logo 2019-04-30 16:08:53 +02:00
Daniel García
21325b7523 Updated .env template 2019-04-27 20:14:37 +02:00
Daniel García
874f5c34bd Formatting 2019-04-26 22:08:26 +02:00
Daniel García
eadab2e9ca Updated dependencies 2019-04-26 22:07:00 +02:00
Daniel García
253faaf023 Use users duo host when required, instead of always using the global one 2019-04-15 13:07:23 +02:00
Daniel García
3d843a6a51 Merge pull request #460 from janost/organization-vault-purge
Fixed purging organization vault
2019-04-14 22:30:51 +02:00
janost
03fdf36bf9 Fixed purging organization vault 2019-04-14 22:12:48 +02:00
Daniel García
fdcc32beda Validate Duo credentials when custom 2019-04-14 22:05:05 +02:00
Daniel García
bf20355c5e Merge branch 'duo' 2019-04-14 22:02:55 +02:00
Daniel García
0136c793b4 Implement better user status API, in the future we'll probably want a way to disable users.
We should migrate from the empty password hash to a separate column then.
2019-04-13 00:01:52 +02:00
Daniel García
2e12114350 Always create the user when inviting from admin panel 2019-04-12 23:44:49 +02:00
Daniel García
f25ab42ebb Merge pull request #455 from ViViDboarder/get_users
Add new endpoint for retrieving all users
2019-04-11 20:42:44 +02:00
ViViDboarder
d3a8a278e6 Add new endpoint for retrieving all users 2019-04-11 11:24:53 -07:00
Daniel García
8d9827c55f Implement selection between global config and user settings for duo keys. 2019-04-11 18:40:03 +02:00
Daniel García
cad63f9761 Auto generate akey 2019-04-11 16:08:26 +02:00
Daniel García
bf446f44f9 Enable DATA_FOLDER to affect default CONFIG_FILE path 2019-04-11 15:41:13 +02:00
Daniel García
621f607297 Update dependencies and fix some warnings 2019-04-11 15:40:19 +02:00
Daniel García
d89bd707a8 Update vault release to show duo button 2019-04-07 18:58:32 +02:00
Daniel García
754087b990 Add global duo config and document options in .env template 2019-04-07 18:58:15 +02:00
Daniel García
cfbeb56371 Implement user duo, initial version
TODO:
- At the moment each user needs to configure a DUO application and input the API keys, we need to check if multiple users can register with the same keys correctly and if so we could implement a global setting.
- Sometimes the Duo frame doesn't load correctly, but canceling, reloading the page and logging in again seems to fix it for me.
2019-04-05 22:09:53 +02:00
Daniel García
3bb46ce496 Make the syslog crate non-optional when available 2019-04-02 22:35:22 +02:00
Daniel García
c5832f2b30 With the latest fern, syslog can be a config option instead of a build flag 2019-03-29 20:27:20 +01:00
Daniel García
d9406b0095 Update to web vault 2.10.0 2019-03-25 23:49:12 +01:00
Daniel García
2475c36a75 Implement log_level config option 2019-03-25 14:23:14 +01:00
Daniel García
c384f9c0ca Set default log level to Info, we don't use debug anyway and it just fills the logs with other crates info. 2019-03-25 14:21:50 +01:00
Daniel García
afbfebf659 Merge pull request #440 from BlackDex/mail-encoding
Fixed long e-mail message extending 1000 lines.
2019-03-25 13:09:51 +01:00
BlackDex
6b686c18f7 Fixed long e-mail message extending 1000 lines.
- Added quoted_printable crate to encode the e-mail messages.
- Change the way the e-mail gets build to use custom part headers.
2019-03-25 09:48:19 +01:00
Daniel García
349cb33fbd Updated dependencies 2019-03-23 19:48:22 +01:00
Daniel García
d7542b6818 Merge pull request #437 from njfox/fix-smtp-error
Split up long line to stop SMTP from breaking
2019-03-21 14:22:57 +01:00
Nick Fox
7976d39d9d Adjust whitespace 2019-03-20 23:29:29 -04:00
Nick Fox
5ee9676941 Break up long line to stop SMTP from breaking 2019-03-20 23:24:30 -04:00
Daniel García
4b40cda910 Added domain blacklist regex for icons service and improved valid domain check.
Reorganized the icons code a bit.
2019-03-18 22:12:39 +01:00
Daniel García
4689ed7b30 Changed uppercase deserializer to avoid a clone. 2019-03-18 22:02:37 +01:00
Daniel García
084bc2aee3 Use final release of lettre and update dependencies 2019-03-17 14:43:22 +01:00
Daniel García
6d7e15b2fd Use web vault 2.9.0 release 2019-03-14 13:29:03 +01:00
Daniel García
61515160a7 Allow changing error codes and create an empty error.
Return 404 instead of 400 when no accounts breached.
2019-03-14 00:17:36 +01:00
Daniel García
a25bfdd16d Remove unused features from multipart (integration with other servers) 2019-03-13 15:57:00 +01:00
Daniel García
e93538cea9 Add option to use wrapped TLS in email, instead of STARTTLS upgrade 2019-03-10 14:45:42 +01:00
Daniel García
b4244b28b6 Update admin page scripts and fixed broken tooltip 2019-03-09 14:41:34 +01:00
Daniel García
43f9038325 Add option to force resync clients in admin panel 2019-03-07 21:08:33 +01:00
Daniel García
27872f476e Update dependencies 2019-03-07 20:22:08 +01:00
Daniel García
339044f8aa Add warning about config panel values overriding env vars. 2019-03-07 20:22:02 +01:00
Daniel García
0718a090e1 Trim spaces from admin token during authentication and validate that the admin panel token is not empty 2019-03-07 20:21:50 +01:00
Daniel García
9e1f030a80 Explicitly close SMTP connection in case of error. 2019-03-07 20:21:10 +01:00
Daniel García
04922f6aa0 Some formatting and dependency updates 2019-03-03 16:11:55 +01:00
Daniel García
7d2bc9e162 Added option to force 2fa at logins and made some changes to two factor code.
Added newlines to config options to keep them a reasonable length.
2019-03-03 16:09:15 +01:00
Daniel García
c6c00729e3 Update vault to new version. No need to wait for a release when even the official web vault is already using it 2019-02-27 17:28:04 +01:00
Daniel García
10756b0920 Update dependencies and fix some lints 2019-02-27 17:21:04 +01:00
Daniel García
1eb1502a07 Merge pull request #416 from mprasil/armv6
Armv6
2019-02-25 18:26:53 +01:00
Miroslav Prasil
30e72a96a9 Symlink missing ld-linux file 2019-02-25 16:17:34 +00:00
Daniel García
2646db78a4 Merge pull request #414 from FrankPetrilli/patch-1
Minor typo fix conect => connect
2019-02-25 14:21:28 +01:00
Miroslav Prasil
f5358b13f5 Add Dockerfile for armv6 2019-02-25 12:17:22 +00:00
Frank Petrilli
d156170971 Minor typo fix conect => connect 2019-02-24 16:08:38 -08:00
Daniel García
d9bfe847db Merge pull request #410 from gdamjan/remove-uneeded-mutability
remove some unneeded mutability
2019-02-22 22:52:53 +01:00
Дамјан Георгиевски
473f8b8e31 remove some unneeded mutability 2019-02-22 20:25:50 +01:00
Daniel García
aeb4b4c8a5 Remove verbose, otherwise the logs get filled with useless info 2019-02-22 16:16:07 +01:00
Daniel García
980a3e45db Set up CI with Azure Pipelines 2019-02-22 15:51:30 +01:00
Daniel García
5794969f5b Merge pull request #406 from shauder/feature/disable-admin-token
Allow the Admin token to be disabled in the advanced menu
2019-02-20 23:06:52 +01:00
Shane Faulkner
8b5b06c3d1 Allow the Admin token to be disabled in the advanced menu 2019-02-20 14:56:08 -06:00
Daniel García
b50c27b619 Print a warning when an env variable is being overriden by the config file, and reorganize the main file a bit.
Modified the JWT key generation, now it should also show the output of OpenSSL in the logs.
2019-02-20 20:59:37 +01:00
Daniel García
5ee04e31e5 Updated dependencies, removed some unnecessary clones and fixed some lints 2019-02-20 17:54:18 +01:00
Daniel García
bf6ae91a6d Remove margins on small devices 2019-02-18 20:43:34 +01:00
Daniel García
828e3a5795 Add extra padding when the toolbar collapses in small devices 2019-02-18 20:33:32 +01:00
Daniel García
7b5bcd45f8 Show read-only options in the config panel and the env variable names in the tooltips 2019-02-18 19:25:33 +01:00
Daniel García
72de16fb86 Merge pull request #404 from mprasil/disable_wal
Add an option to not enable WAL (should help in #399)
2019-02-18 16:10:16 +01:00
Miroslav Prasil
0b903fc5f4 Extended the template file and refer to wiki 2019-02-18 14:57:21 +00:00
Miroslav Prasil
4df686f49e Add an option to not enable WAL (should help in #399) 2019-02-18 10:48:48 +00:00
Daniel García
d7eeaaf249 Escape user data from admin panel when calling JS 2019-02-17 15:24:14 +01:00
TheMardy
84fb6aaddb Set correct MIME type 2019-02-17 01:08:24 +01:00
Daniel García
a744b9437a Implemented multiple U2f keys, key names, and compromised checks 2019-02-16 23:07:48 +01:00
Daniel García
6027b969f5 Delete old devices when deauthorizing user sessions 2019-02-16 23:06:26 +01:00
Daniel García
93805a5d7b Fix Yubikeys deleted on error 2019-02-16 21:30:55 +01:00
Daniel García
71da961ecd Merge pull request #402 from mprasil/version_in_docker
Include git repo in build so we get version
2019-02-16 12:20:25 +01:00
Miroslav Prasil
dd421809e5 Include git repo in build so we get version 2019-02-16 08:50:16 +00:00
TheMardy
8526055bb7 Added images to email templates 2019-02-16 03:48:23 +01:00
TheMardy
a79334ea4c Added static email image routes 2019-02-16 03:44:30 +01:00
Daniel García
274ea9a4f2 Use the latest fast_chemail crate directly, with the fix 2019-02-15 14:39:30 +01:00
Daniel García
8743d18aca Update travis image and remove now-ignored sudo tag 2019-02-13 18:50:45 +01:00
Daniel García
d3773a433a Removed list of mounted routes at startup by default, with option to add it back. This would get annoying when starting the server frequently, because it printed ~130 lines of mostly useless info 2019-02-13 00:03:16 +01:00
Daniel García
0f0a87becf Add version to initial message 2019-02-12 22:47:00 +01:00
Daniel García
4b57bb8eeb Merge pull request #394 from BlackDex/icon-timeout
Added config option for icon download timeout
2019-02-12 22:00:12 +01:00
BlackDex
3b27dbb0aa Added config option for icon download timeout 2019-02-12 21:56:28 +01:00
Daniel García
ff2fbd322e Update deps and fix email check 2019-02-12 15:01:02 +01:00
Daniel García
9636f33fdb Implement constant time equal check for admin, 2fa recover and 2fa remember tokens 2019-02-11 23:45:55 +01:00
Daniel García
bbe2a1b264 Merge pull request #391 from TheMardy/master
Updated Email Templates
2019-02-10 22:03:20 +01:00
Daniel García
79fdfd6524 Add missing url parameter 2019-02-10 21:40:20 +01:00
Daniel García
d086a99e5b Implemented HTML emails with text alternative 2019-02-10 19:12:34 +01:00
TheMardy
22b0b95209 Added HTML templates (+14 squashed commit)
Squashed commit:

[ece2260] Plaintext send_org_invite

[01d4884] Plaintext pw_hint_some

[6ce5173] Plaintext pw_hint_none

[881af3e] Plaintext invite_confirmed

[ce78621] Plaintext invite_accepted

[13a44a4] Rename send_org_invite.hbs to send_org_invite.html.hbs

[b52bf2f] Rename pw_hint_some.hbs to pw_hint_some.html.hbs

[e0d1aeb] Rename pw_hint_none.hbs to pw_hint_none.html.hbs

[898dbcd] Rename invite_confirmed.hbs to invite_confirmed.html.hbs

[107af31] Rename invite_accepted.hbs to invite_accepted.html.hbs

[d26d662] Updated send_org_invite template

[71f47af] Updated pw_hint_some template

[c2ca3c2] Updated pw_hint_none template

[50f8bfb] Updated invite_accepted template

[17f96f8] Updated invite_confirmed template
2019-02-10 19:04:18 +01:00
Daniel García
28d1588e73 Show version in admin panel 2019-02-10 16:02:46 +01:00
Daniel García
f3b1a5ff3e Error when admin panel is disabled 2019-02-10 15:26:19 +01:00
Daniel García
330e90a6ac Hide secrets in config panel 2019-02-08 20:49:04 +01:00
141 changed files with 20003 additions and 2655 deletions

View File

@@ -9,10 +9,6 @@ data
.idea
*.iml
# Git files
.git
.gitignore
# Documentation
*.md

View File

@@ -4,8 +4,13 @@
## Main data folder
# DATA_FOLDER=data
## Individual folders, these override %DATA_FOLDER%
## Database URL
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
## When using MySQL, this it is the URL to the DB, including username and password:
## Format: mysql://[user[:password]@]host/database_name
# DATABASE_URL=data/db.sqlite3
## Individual folders, these override %DATA_FOLDER%
# RSA_KEY_FILENAME=data/rsa_key
# ICON_CACHE_FOLDER=data/icon_cache
# ATTACHMENTS_FOLDER=data/attachments
@@ -35,7 +40,7 @@
## Enable extended logging
## This shows timestamps and allows logging to file and to syslog
### To enable logging to file, use the LOG_FILE env variable
### To enable syslog, you need to compile with `cargo build --features=enable_syslog'
### To enable syslog, use the USE_SYSLOG env variable
# EXTENDED_LOGGING=true
## Logging to file
@@ -43,20 +48,70 @@
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
# LOG_FILE=/path/to/log
## Logging to Syslog
## This requires extended logging
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
# USE_SYSLOG=false
## Log level
## Change the verbosity of the log output
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
## This requires extended logging
# LOG_LEVEL=Info
## Enable WAL for the DB
## Set to false to avoid enabling WAL during startup.
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
## this setting only prevents bitwarden_rs from automatically enabling it on start.
## Please read project wiki page about this setting first before changing the value as it can
## cause performance degradation or might render the service unable to start.
# ENABLE_DB_WAL=true
## Disable icon downloading
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
## otherwise it will delete them and they won't be downloaded again.
# DISABLE_ICON_DOWNLOAD=false
## Icon download timeout
## Configure the timeout value when downloading the favicons.
## The default is 10 seconds, but this could be to low on slower network connections
# ICON_DOWNLOAD_TIMEOUT=10
## Icon blacklist Regex
## Any domains or IPs that match this regex won't be fetched by the icon service.
## Useful to hide other servers in the local network. Check the WIKI for more details
# ICON_BLACKLIST_REGEX=192\.168\.1\.[0-9].*^
## Any IP which is not defined as a global IP will be blacklisted.
## Usefull to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
## Disable 2FA remember
## Enabling this would force the users to use a second factor to login every time.
## Note that the checkbox would still be present, but ignored.
# DISABLE_2FA_REMEMBER=false
## Controls if new users can register
# SIGNUPS_ALLOWED=true
## Controls if new users from a list of comma-separated domains can register
## even if SIGNUPS_ALLOWED is set to false
##
## WARNING: There is currently no validation that prevents anyone from
## signing up with any made-up email address from one of these
## whitelisted domains!
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
## Token for the admin interface, preferably use a long random string
## One option is to use 'openssl rand -base64 48'
## If not set, the admin panel is disabled
# ADMIN_TOKEN=Vy2VyYTTsKPv8W5aEOWUbB/Bt3DEKePbHmI4m9VcemUMS2rEviDowNAFqYi1xjmp
## Enable this to bypass the admin panel security. This option is only
## meant to be used with the use of a separate auth layer in front
# DISABLE_ADMIN_TOKEN=false
## Invitations org admins to invite users, even when signups are disabled
# INVITATIONS_ALLOWED=true
@@ -82,6 +137,29 @@
# YUBICO_SECRET_KEY=AAAAAAAAAAAAAAAAAAAAAAAA
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
## Duo Settings
## You need to configure all options to enable global Duo support, otherwise users would need to configure it themselves
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
## Then set the following options, based on the values obtained from the last step:
# DUO_IKEY=<Integration Key>
# DUO_SKEY=<Secret Key>
# DUO_HOST=<API Hostname>
## After that, you should be able to follow the rest of the guide linked above,
## ignoring the fields that ask for the values that you already configured beforehand.
## Authenticator Settings
## Disable authenticator time drifted codes to be valid.
## TOTP codes of the previous and next 30 seconds will be invalid
##
## According to the RFC6238 (https://tools.ietf.org/html/rfc6238),
## we allow by default the TOTP code which was valid one step back and one in the future.
## This can however allow attackers to be a bit more lucky with there attempts because there are 3 valid codes.
## You can disable this, so that only the current TOTP Code is allowed.
## Keep in mind that when a sever drifts out of time, valid codes could be marked as invalid.
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
# AUTHENTICATOR_DISABLE_TIME_DRIFT = false
## Rocket specific settings, check Rocket documentation to learn more
# ROCKET_ENV=staging
# ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app
@@ -97,4 +175,8 @@
# SMTP_PORT=587
# SMTP_SSL=true
# SMTP_USERNAME=username
# SMTP_PASSWORD=password
# SMTP_PASSWORD=password
# SMTP_AUTH_MECHANISM="Plain"
# SMTP_TIMEOUT=15
# vim: syntax=ini

7
.hadolint.yaml Normal file
View File

@@ -0,0 +1,7 @@
ignored:
# disable explicit version for apt install
- DL3008
# disable explicit version for apk install
- DL3018
trustedRegistries:
- docker.io

View File

@@ -1,9 +1,21 @@
# Copied from Rocket's .travis.yml
dist: xenial
env:
global:
- HADOLINT_VERSION=1.17.1
language: rust
sudo: required # so we get a VM with higher specs
dist: trusty # so we get a VM with higher specs
rust: nightly
cache: cargo
rust:
- nightly
before_install:
- sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint
- sudo chmod +rx /usr/local/bin/hadolint
- rustup set profile minimal
# Nothing to install
install: true
script:
- cargo build --verbose --all-features
- git ls-files --exclude='Dockerfile*' --ignored | xargs --max-lines=1 hadolint
- cargo build --features "sqlite"
- cargo build --features "mysql"

2427
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,53 +11,59 @@ publish = false
build = "build.rs"
[features]
enable_syslog = ["syslog", "fern/syslog-4"]
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
enable_syslog = []
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
postgresql = ["diesel/postgres", "diesel_migrations/postgres", "openssl"]
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
[target."cfg(not(windows))".dependencies]
syslog = "4.0.1"
[dependencies]
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
rocket = { version = "0.4.0", features = ["tls"], default-features = false }
rocket_contrib = "0.4.0"
rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
rocket_contrib = "0.5.0-dev"
# HTTP client
reqwest = "0.9.9"
reqwest = "0.9.22"
# multipart/form-data support
multipart = "0.16.1"
multipart = { version = "0.16.1", features = ["server"], default-features = false }
# WebSockets library
ws = "0.7.9"
ws = "0.9.1"
# MessagePack library
rmpv = "0.4.0"
rmpv = "0.4.2"
# Concurrent hashmap implementation
chashmap = "2.2.0"
chashmap = "2.2.2"
# A generic serialization/deserialization framework
serde = "1.0.85"
serde_derive = "1.0.85"
serde_json = "1.0.37"
serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
# Logging
log = "0.4.6"
fern = "0.5.7"
syslog = { version = "4.0.1", optional = true }
log = "0.4.8"
fern = { version = "0.5.9", features = ["syslog-4"] }
# A safe, extensible ORM and Query builder
diesel = { version = "1.4.1", features = ["sqlite", "chrono", "r2d2"] }
diesel_migrations = { version = "1.4.0", features = ["sqlite"] }
diesel = { version = "1.4.3", features = [ "chrono", "r2d2"] }
diesel_migrations = "1.4.0"
# Bundled SQLite
libsqlite3-sys = { version = "0.12.0", features = ["bundled"] }
# Bundled SQLite
libsqlite3-sys = { version = "0.16.0", features = ["bundled"], optional = true }
# Crypto library
ring = { version = "0.13.5", features = ["rsa_signing"] }
ring = "0.14.6"
# UUID generation
uuid = { version = "0.7.2", features = ["v4"] }
uuid = { version = "0.8.1", features = ["v4"] }
# Date and time library for Rust
chrono = "0.4.6"
chrono = "0.4.9"
# TOTP library
oath = "0.10.2"
@@ -66,44 +72,54 @@ oath = "0.10.2"
data-encoding = "2.1.2"
# JWT library
jsonwebtoken = "5.0.1"
jsonwebtoken = "6.0.1"
# U2F library
u2f = "0.1.4"
u2f = "0.1.6"
# Yubico Library
yubico = { version = "0.5.1", features = ["online"], default-features = false }
yubico = { version = "0.7.1", features = ["online-tokio"], default-features = false }
# A `dotenv` implementation for Rust
dotenv = { version = "0.13.0", default-features = false }
dotenv = { version = "0.15.0", default-features = false }
# Lazy static macro
lazy_static = { version = "1.2.0", features = ["nightly"] }
lazy_static = "1.4.0"
# More derives
derive_more = "0.13.0"
derive_more = "0.99.2"
# Numerical libraries
num-traits = "0.2.6"
num-derive = "0.2.4"
num-traits = "0.2.9"
num-derive = "0.3.0"
# Email libraries
lettre = "0.9.0"
lettre_email = "0.9.0"
native-tls = "0.2.2"
lettre = "0.9.2"
lettre_email = "0.9.2"
native-tls = "0.2.3"
quoted_printable = "0.4.1"
# Template library
handlebars = "1.1.0"
handlebars = "2.0.2"
# For favicon extraction from main website
soup = "0.3.0"
regex = "1.1.0"
soup = "0.4.1"
regex = "1.3.1"
# Required for SSL support for PostgreSQL
openssl = { version = "0.10.25", optional = true }
# URL encoding library
percent-encoding = "2.1.0"
[patch.crates-io]
# Add support for Timestamp type
rmp = { git = 'https://github.com/dani-garcia/msgpack-rust' }
rmp = { git = 'https://github.com/3Hren/msgpack-rust', rev = 'd6c6c672e470341207ed9feb69b56322b5597a11' }
# Use new native_tls version 0.2
lettre = { git = 'https://github.com/lettre/lettre', rev = 'c988b1760ad81' }
lettre_email = { git = 'https://github.com/lettre/lettre', rev = 'c988b1760ad81' }
# Use newest ring
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'b95b6765e1cc8be7c1e7eaef8a9d9ad940b0ac13' }
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'b95b6765e1cc8be7c1e7eaef8a9d9ad940b0ac13' }
# Use git version for timeout fix #706
lettre = { git = 'https://github.com/lettre/lettre', rev = '24d694db3be017d82b1cdc8bf9da601420b31bb0' }
lettre_email = { git = 'https://github.com/lettre/lettre', rev = '24d694db3be017d82b1cdc8bf9da601420b31bb0' }

View File

@@ -1,86 +0,0 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine as vault
ENV VAULT_VERSION "v2.8.0d"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --update-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust as build
# Using bundled SQLite, no need to install it
# RUN apt-get update && apt-get install -y\
# sqlite3\
# --no-install-recommends\
# && rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM debian:stretch-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
# Install needed libraries
RUN apt-get update && apt-get install -y\
openssl\
ca-certificates\
--no-install-recommends\
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs .
# Configures the startup!
CMD ./bitwarden_rs

1
Dockerfile Symbolic link
View File

@@ -0,0 +1 @@
docker/amd64/sqlite/Dockerfile

View File

@@ -3,7 +3,7 @@
---
[![Travis Build Status](https://travis-ci.org/dani-garcia/bitwarden_rs.svg?branch=master)](https://travis-ci.org/dani-garcia/bitwarden_rs)
[![Docker Pulls](https://img.shields.io/docker/pulls/mprasil/bitwarden.svg)](https://hub.docker.com/r/mprasil/bitwarden)
[![Docker Pulls](https://img.shields.io/docker/pulls/bitwardenrs/server.svg)](https://hub.docker.com/r/bitwardenrs/server)
[![Dependency Status](https://deps.rs/repo/github/dani-garcia/bitwarden_rs/status.svg)](https://deps.rs/repo/github/dani-garcia/bitwarden_rs)
[![GitHub Release](https://img.shields.io/github/release/dani-garcia/bitwarden_rs.svg)](https://github.com/dani-garcia/bitwarden_rs/releases/latest)
[![GPL-3.0 Licensed](https://img.shields.io/github/license/dani-garcia/bitwarden_rs.svg)](https://github.com/dani-garcia/bitwarden_rs/blob/master/LICENSE.txt)
@@ -34,8 +34,8 @@ Basically full implementation of Bitwarden API is provided including:
Pull the docker image and mount a volume from the host for persistent storage:
```sh
docker pull mprasil/bitwarden:latest
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 mprasil/bitwarden:latest
docker pull bitwardenrs/server:latest
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 bitwardenrs/server:latest
```
This will preserve any persistent data under /bw-data/, you can adapt the path to whatever suits you.
@@ -50,6 +50,6 @@ See the [bitwarden_rs wiki](https://github.com/dani-garcia/bitwarden_rs/wiki) fo
## Get in touch
To ask an question, [raising an issue](https://github.com/dani-garcia/bitwarden_rs/issues/new) is fine, also please report any bugs spotted here.
To ask a question, [raising an issue](https://github.com/dani-garcia/bitwarden_rs/issues/new) is fine. Please also report any bugs spotted here.
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/#bitwarden_rs:matrix.org) room on Matrix. Feel free to join us!

25
azure-pipelines.yml Normal file
View File

@@ -0,0 +1,25 @@
pool:
vmImage: 'Ubuntu-16.04'
steps:
- script: |
ls -la
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $(cat rust-toolchain) --profile=minimal
echo "##vso[task.prependpath]$HOME/.cargo/bin"
displayName: 'Install Rust'
- script: |
sudo apt-get update
sudo apt-get install -y libmysql++-dev
displayName: Install libmysql
- script: |
rustc -Vv
cargo -V
displayName: Query rust and cargo versions
- script : cargo build --features "sqlite"
displayName: 'Build project with sqlite backend'
- script : cargo build --features "mysql"
displayName: 'Build project with mysql backend'

View File

@@ -1,11 +1,25 @@
use std::process::Command;
fn main() {
#[cfg(all(feature = "sqlite", feature = "mysql"))]
compile_error!("Can't enable both sqlite and mysql at the same time");
#[cfg(all(feature = "sqlite", feature = "postgresql"))]
compile_error!("Can't enable both sqlite and postgresql at the same time");
#[cfg(all(feature = "mysql", feature = "postgresql"))]
compile_error!("Can't enable both mysql and postgresql at the same time");
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
read_git_info().ok();
}
fn run(args: &[&str]) -> Result<String, std::io::Error> {
let out = Command::new(args[0]).args(&args[1..]).output()?;
if !out.status.success() {
use std::io::{Error, ErrorKind};
return Err(Error::new(ErrorKind::Other, "Command not successful"));
}
Ok(String::from_utf8(out.stdout).unwrap().trim().to_string())
}
@@ -13,8 +27,10 @@ fn run(args: &[&str]) -> Result<String, std::io::Error> {
fn read_git_info() -> Result<(), std::io::Error> {
// The exact tag for the current commit, can be empty when
// the current commit doesn't have an associated tag
let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"])?;
println!("cargo:rustc-env=GIT_EXACT_TAG={}", exact_tag);
let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"]).ok();
if let Some(ref exact) = exact_tag {
println!("cargo:rustc-env=GIT_EXACT_TAG={}", exact);
}
// The last available tag, equal to exact_tag when
// the current commit is tagged
@@ -27,13 +43,25 @@ fn read_git_info() -> Result<(), std::io::Error> {
// The current git commit hash
let rev = run(&["git", "rev-parse", "HEAD"])?;
let rev_short = rev.get(..12).unwrap_or_default();
let rev_short = rev.get(..8).unwrap_or_default();
println!("cargo:rustc-env=GIT_REV={}", rev_short);
// Combined version
let version = if let Some(exact) = exact_tag {
exact
} else if &branch != "master" {
format!("{}-{} ({})", last_tag, rev_short, branch)
} else {
format!("{}-{}", last_tag, rev_short)
};
println!("cargo:rustc-env=GIT_VERSION={}", version);
// To access these values, use:
// env!("GIT_EXACT_TAG")
// env!("GIT_LAST_TAG")
// env!("GIT_BRANCH")
// env!("GIT_REV")
// env!("GIT_VERSION")
Ok(())
}

View File

@@ -0,0 +1,107 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0b"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.38 as build
# set mysql backend
ARG DB=mysql
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-aarch64-linux-gnu \
&& mkdir -p ~/.cargo \
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
WORKDIR /app
# Prepare openssl arm64 libs
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture arm64 \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:arm64 \
libc6-dev:arm64 \
libmariadb-dev:arm64
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Build
RUN rustup target add aarch64-unknown-linux-gnu
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/aarch64-debian:buster
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
libmariadbclient-dev \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -2,29 +2,35 @@
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine as vault
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.8.0d"
ENV VAULT_VERSION "v2.12.0b"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --update-cache --upgrade \
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust as build
FROM rust:1.38 as build
# set sqlite as default for DB ARG for backward comaptibility
ARG DB=sqlite
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-aarch64-linux-gnu \
&& mkdir -p ~/.cargo \
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
@@ -41,6 +47,7 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
&& dpkg --add-architecture arm64 \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:arm64 \
libc6-dev:arm64
@@ -55,12 +62,12 @@ COPY . .
# Build
RUN rustup target add aarch64-unknown-linux-gnu
RUN cargo build --release --target=aarch64-unknown-linux-gnu -v
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/aarch64-debian:stretch
FROM balenalib/aarch64-debian:buster
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
@@ -69,10 +76,12 @@ ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y\
openssl\
ca-certificates\
--no-install-recommends\
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
sqlite3 \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
@@ -88,5 +97,10 @@ COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
CMD ./bitwarden_rs
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -0,0 +1,104 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0b"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.38 as build
# set mysql backend
ARG DB=mysql
# Using bundled SQLite, no need to install it
# RUN apt-get update && apt-get install -y\
# --no-install-recommends \
# sqlite3\
# && rm -rf /var/lib/apt/lists/*
# Install MySQL package
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libmariadb-dev \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM debian:buster-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
libmariadbclient-dev \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -0,0 +1,86 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0b"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary
FROM clux/muslrust:nightly-2019-10-19 as build
# set mysql backend
ARG DB=mysql
ENV USER "root"
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libmysqlclient-dev \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
RUN rustup target add x86_64-unknown-linux-musl
# Make sure that we actually build the project
RUN touch src/main.rs
# Build
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM alpine:3.10
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs
# Install needed libraries
RUN apk add --no-cache \
openssl \
mariadb-connector-c \
curl \
ca-certificates
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -0,0 +1,105 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0b"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.38 as build
# set mysql backend
ARG DB=postgresql
# Using bundled SQLite, no need to install it
# RUN apt-get update && apt-get install -y\
# --no-install-recommends \
# sqlite3\
# && rm -rf /var/lib/apt/lists/*
# Install MySQL package
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libpq-dev \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM debian:buster-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
sqlite3 \
libpq5 \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -0,0 +1,87 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0b"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary
FROM clux/muslrust:nightly-2019-10-19 as build
# set mysql backend
ARG DB=postgresql
ENV USER "root"
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libpq-dev \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
RUN rustup target add x86_64-unknown-linux-musl
# Make sure that we actually build the project
RUN touch src/main.rs
# Build
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM alpine:3.10
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs
# Install needed libraries
RUN apk add --no-cache \
openssl \
postgresql-libs \
curl \
sqlite \
ca-certificates
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -0,0 +1,98 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0b"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.38 as build
# set sqlite as default for DB ARG for backward comaptibility
ARG DB=sqlite
# Using bundled SQLite, no need to install it
# RUN apt-get update && apt-get install -y\
# --no-install-recommends \
# sqlite3 \
# && rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM debian:buster-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
sqlite3 \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -2,25 +2,30 @@
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine as vault
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.8.0d"
ENV VAULT_VERSION "v2.12.0b"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --update-cache --upgrade \
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary
FROM clux/muslrust:nightly-2018-12-01 as build
FROM clux/muslrust:nightly-2019-10-19 as build
# set sqlite as default for DB ARG for backward comaptibility
ARG DB=sqlite
ENV USER "root"
@@ -32,13 +37,16 @@ COPY . .
RUN rustup target add x86_64-unknown-linux-musl
# Make sure that we actually build the project
RUN touch src/main.rs
# Build
RUN cargo build --release
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM alpine:3.8
FROM alpine:3.10
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
@@ -46,10 +54,11 @@ ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs
# Install needed libraries
RUN apk add \
openssl\
ca-certificates \
&& rm /var/cache/apk/*
RUN apk add --no-cache \
openssl \
curl \
sqlite \
ca-certificates
RUN mkdir /data
VOLUME /data
@@ -62,5 +71,11 @@ COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
CMD ./bitwarden_rs
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -0,0 +1,107 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0b"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.38 as build
# set mysql backend
ARG DB=mysql
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabi \
&& mkdir -p ~/.cargo \
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
WORKDIR /app
# Prepare openssl armel libs
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armel \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armel \
libc6-dev:armel \
libmariadb-dev:armel
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Build
RUN rustup target add arm-unknown-linux-gnueabi
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/rpi-debian:buster
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
libmariadbclient-dev \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -0,0 +1,106 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0b"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.38 as build
# set sqlite as default for DB ARG for backward comaptibility
ARG DB=sqlite
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabi \
&& mkdir -p ~/.cargo \
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
WORKDIR /app
# Prepare openssl armel libs
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armel \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armel \
libc6-dev:armel
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Build
RUN rustup target add arm-unknown-linux-gnueabi
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/rpi-debian:buster
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
sqlite3 \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -0,0 +1,108 @@
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.12.0b"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.38 as build
# set mysql backend
ARG DB=mysql
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabihf \
&& mkdir -p ~/.cargo \
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
WORKDIR /app
# Prepare openssl armhf libs
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armhf \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armhf \
libc6-dev:armhf \
libmariadb-dev:armhf
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Build
RUN rustup target add armv7-unknown-linux-gnueabihf
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/armv7hf-debian:buster
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
libmariadbclient-dev \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -2,29 +2,35 @@
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM alpine as vault
FROM alpine:3.10 as vault
ENV VAULT_VERSION "v2.8.0d"
ENV VAULT_VERSION "v2.12.0b"
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
RUN apk add --update-cache --upgrade \
RUN apk add --no-cache --upgrade \
curl \
tar
RUN mkdir /web-vault
WORKDIR /web-vault
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN curl -L $URL | tar xz
RUN ls
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust as build
FROM rust:1.38 as build
# set sqlite as default for DB ARG for backward comaptibility
ARG DB=sqlite
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabihf \
&& mkdir -p ~/.cargo \
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
@@ -41,6 +47,7 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
&& dpkg --add-architecture armhf \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armhf \
libc6-dev:armhf
@@ -55,12 +62,12 @@ COPY . .
# Build
RUN rustup target add armv7-unknown-linux-gnueabihf
RUN cargo build --release --target=armv7-unknown-linux-gnueabihf -v
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf -v
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/armv7hf-debian:stretch
FROM balenalib/armv7hf-debian:buster
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
@@ -69,11 +76,13 @@ ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y\
openssl\
ca-certificates\
--no-install-recommends\
&& rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
sqlite3 \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
@@ -88,5 +97,10 @@ COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
CMD ./bitwarden_rs
WORKDIR /
CMD ["/bitwarden_rs"]

8
docker/healthcheck.sh Normal file
View File

@@ -0,0 +1,8 @@
#!/usr/bin/env sh
if [ -z "$ROCKET_TLS"]
then
curl --fail http://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
else
curl --insecure --fail https://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
fi

View File

@@ -0,0 +1,62 @@
CREATE TABLE users (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
email VARCHAR(255) NOT NULL UNIQUE,
name TEXT NOT NULL,
password_hash BLOB NOT NULL,
salt BLOB NOT NULL,
password_iterations INTEGER NOT NULL,
password_hint TEXT,
`key` TEXT NOT NULL,
private_key TEXT,
public_key TEXT,
totp_secret TEXT,
totp_recover TEXT,
security_stamp TEXT NOT NULL,
equivalent_domains TEXT NOT NULL,
excluded_globals TEXT NOT NULL
);
CREATE TABLE devices (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
name TEXT NOT NULL,
type INTEGER NOT NULL,
push_token TEXT,
refresh_token TEXT NOT NULL
);
CREATE TABLE ciphers (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
folder_uuid CHAR(36) REFERENCES folders (uuid),
organization_uuid CHAR(36),
type INTEGER NOT NULL,
name TEXT NOT NULL,
notes TEXT,
fields TEXT,
data TEXT NOT NULL,
favorite BOOLEAN NOT NULL
);
CREATE TABLE attachments (
id CHAR(36) NOT NULL PRIMARY KEY,
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
file_name TEXT NOT NULL,
file_size INTEGER NOT NULL
);
CREATE TABLE folders (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
name TEXT NOT NULL
);

View File

@@ -0,0 +1,30 @@
CREATE TABLE collections (
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
org_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
name TEXT NOT NULL
);
CREATE TABLE organizations (
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
billing_email TEXT NOT NULL
);
CREATE TABLE users_collections (
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
PRIMARY KEY (user_uuid, collection_uuid)
);
CREATE TABLE users_organizations (
uuid CHAR(36) NOT NULL PRIMARY KEY,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
access_all BOOLEAN NOT NULL,
`key` TEXT NOT NULL,
status INTEGER NOT NULL,
type INTEGER NOT NULL,
UNIQUE (user_uuid, org_uuid)
);

View File

@@ -0,0 +1,34 @@
ALTER TABLE ciphers RENAME TO oldCiphers;
CREATE TABLE ciphers (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
user_uuid CHAR(36) REFERENCES users (uuid), -- Make this optional
organization_uuid CHAR(36) REFERENCES organizations (uuid), -- Add reference to orgs table
-- Remove folder_uuid
type INTEGER NOT NULL,
name TEXT NOT NULL,
notes TEXT,
fields TEXT,
data TEXT NOT NULL,
favorite BOOLEAN NOT NULL
);
CREATE TABLE folders_ciphers (
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
folder_uuid CHAR(36) NOT NULL REFERENCES folders (uuid),
PRIMARY KEY (cipher_uuid, folder_uuid)
);
INSERT INTO ciphers (uuid, created_at, updated_at, user_uuid, organization_uuid, type, name, notes, fields, data, favorite)
SELECT uuid, created_at, updated_at, user_uuid, organization_uuid, type, name, notes, fields, data, favorite FROM oldCiphers;
INSERT INTO folders_ciphers (cipher_uuid, folder_uuid)
SELECT uuid, folder_uuid FROM oldCiphers WHERE folder_uuid IS NOT NULL;
DROP TABLE oldCiphers;
ALTER TABLE users_collections ADD COLUMN read_only BOOLEAN NOT NULL DEFAULT 0; -- False

View File

@@ -0,0 +1,5 @@
CREATE TABLE ciphers_collections (
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
PRIMARY KEY (cipher_uuid, collection_uuid)
);

View File

@@ -0,0 +1,14 @@
ALTER TABLE attachments RENAME TO oldAttachments;
CREATE TABLE attachments (
id CHAR(36) NOT NULL PRIMARY KEY,
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
file_name TEXT NOT NULL,
file_size INTEGER NOT NULL
);
INSERT INTO attachments (id, cipher_uuid, file_name, file_size)
SELECT id, cipher_uuid, file_name, file_size FROM oldAttachments;
DROP TABLE oldAttachments;

View File

@@ -0,0 +1,15 @@
CREATE TABLE twofactor (
uuid CHAR(36) NOT NULL PRIMARY KEY,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
type INTEGER NOT NULL,
enabled BOOLEAN NOT NULL,
data TEXT NOT NULL,
UNIQUE (user_uuid, type)
);
INSERT INTO twofactor (uuid, user_uuid, type, enabled, data)
SELECT UUID(), uuid, 0, 1, u.totp_secret FROM users u where u.totp_secret IS NOT NULL;
UPDATE users SET totp_secret = NULL; -- Instead of recreating the table, just leave the columns empty

View File

@@ -0,0 +1,3 @@
CREATE TABLE invitations (
email VARCHAR(255) NOT NULL PRIMARY KEY
);

View File

@@ -4,4 +4,4 @@ ALTER TABLE users
ALTER TABLE users
ADD COLUMN
client_kdf_iter INTEGER NOT NULL DEFAULT 5000;
client_kdf_iter INTEGER NOT NULL DEFAULT 100000;

View File

@@ -0,0 +1,3 @@
ALTER TABLE attachments
ADD COLUMN
`key` TEXT;

View File

@@ -0,0 +1,7 @@
ALTER TABLE attachments CHANGE COLUMN akey `key` TEXT;
ALTER TABLE ciphers CHANGE COLUMN atype type INTEGER NOT NULL;
ALTER TABLE devices CHANGE COLUMN atype type INTEGER NOT NULL;
ALTER TABLE twofactor CHANGE COLUMN atype type INTEGER NOT NULL;
ALTER TABLE users CHANGE COLUMN akey `key` TEXT;
ALTER TABLE users_organizations CHANGE COLUMN akey `key` TEXT;
ALTER TABLE users_organizations CHANGE COLUMN atype type INTEGER NOT NULL;

View File

@@ -0,0 +1,7 @@
ALTER TABLE attachments CHANGE COLUMN `key` akey TEXT;
ALTER TABLE ciphers CHANGE COLUMN type atype INTEGER NOT NULL;
ALTER TABLE devices CHANGE COLUMN type atype INTEGER NOT NULL;
ALTER TABLE twofactor CHANGE COLUMN type atype INTEGER NOT NULL;
ALTER TABLE users CHANGE COLUMN `key` akey TEXT;
ALTER TABLE users_organizations CHANGE COLUMN `key` akey TEXT;
ALTER TABLE users_organizations CHANGE COLUMN type atype INTEGER NOT NULL;

View File

@@ -0,0 +1 @@
ALTER TABLE twofactor ADD COLUMN last_used INTEGER NOT NULL DEFAULT 0;

View File

@@ -0,0 +1,13 @@
DROP TABLE devices;
DROP TABLE attachments;
DROP TABLE users_collections;
DROP TABLE users_organizations;
DROP TABLE folders_ciphers;
DROP TABLE ciphers_collections;
DROP TABLE twofactor;
DROP TABLE invitations;
DROP TABLE collections;
DROP TABLE folders;
DROP TABLE ciphers;
DROP TABLE users;
DROP TABLE organizations;

View File

@@ -0,0 +1,121 @@
CREATE TABLE users (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
email VARCHAR(255) NOT NULL UNIQUE,
name TEXT NOT NULL,
password_hash BYTEA NOT NULL,
salt BYTEA NOT NULL,
password_iterations INTEGER NOT NULL,
password_hint TEXT,
akey TEXT NOT NULL,
private_key TEXT,
public_key TEXT,
totp_secret TEXT,
totp_recover TEXT,
security_stamp TEXT NOT NULL,
equivalent_domains TEXT NOT NULL,
excluded_globals TEXT NOT NULL,
client_kdf_type INTEGER NOT NULL DEFAULT 0,
client_kdf_iter INTEGER NOT NULL DEFAULT 100000
);
CREATE TABLE devices (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
name TEXT NOT NULL,
atype INTEGER NOT NULL,
push_token TEXT,
refresh_token TEXT NOT NULL,
twofactor_remember TEXT
);
CREATE TABLE organizations (
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
billing_email TEXT NOT NULL
);
CREATE TABLE ciphers (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
user_uuid CHAR(36) REFERENCES users (uuid),
organization_uuid CHAR(36) REFERENCES organizations (uuid),
atype INTEGER NOT NULL,
name TEXT NOT NULL,
notes TEXT,
fields TEXT,
data TEXT NOT NULL,
favorite BOOLEAN NOT NULL,
password_history TEXT
);
CREATE TABLE attachments (
id CHAR(36) NOT NULL PRIMARY KEY,
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
file_name TEXT NOT NULL,
file_size INTEGER NOT NULL,
akey TEXT
);
CREATE TABLE folders (
uuid CHAR(36) NOT NULL PRIMARY KEY,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
name TEXT NOT NULL
);
CREATE TABLE collections (
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
org_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
name TEXT NOT NULL
);
CREATE TABLE users_collections (
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
read_only BOOLEAN NOT NULL DEFAULT false,
PRIMARY KEY (user_uuid, collection_uuid)
);
CREATE TABLE users_organizations (
uuid CHAR(36) NOT NULL PRIMARY KEY,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
access_all BOOLEAN NOT NULL,
akey TEXT NOT NULL,
status INTEGER NOT NULL,
atype INTEGER NOT NULL,
UNIQUE (user_uuid, org_uuid)
);
CREATE TABLE folders_ciphers (
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
folder_uuid CHAR(36) NOT NULL REFERENCES folders (uuid),
PRIMARY KEY (cipher_uuid, folder_uuid)
);
CREATE TABLE ciphers_collections (
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
PRIMARY KEY (cipher_uuid, collection_uuid)
);
CREATE TABLE twofactor (
uuid CHAR(36) NOT NULL PRIMARY KEY,
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
atype INTEGER NOT NULL,
enabled BOOLEAN NOT NULL,
data TEXT NOT NULL,
UNIQUE (user_uuid, atype)
);
CREATE TABLE invitations (
email VARCHAR(255) NOT NULL PRIMARY KEY
);

View File

@@ -0,0 +1,26 @@
ALTER TABLE attachments ALTER COLUMN id TYPE CHAR(36);
ALTER TABLE attachments ALTER COLUMN cipher_uuid TYPE CHAR(36);
ALTER TABLE users ALTER COLUMN uuid TYPE CHAR(36);
ALTER TABLE users ALTER COLUMN email TYPE VARCHAR(255);
ALTER TABLE devices ALTER COLUMN uuid TYPE CHAR(36);
ALTER TABLE devices ALTER COLUMN user_uuid TYPE CHAR(36);
ALTER TABLE organizations ALTER COLUMN uuid TYPE CHAR(40);
ALTER TABLE ciphers ALTER COLUMN uuid TYPE CHAR(36);
ALTER TABLE ciphers ALTER COLUMN user_uuid TYPE CHAR(36);
ALTER TABLE ciphers ALTER COLUMN organization_uuid TYPE CHAR(36);
ALTER TABLE folders ALTER COLUMN uuid TYPE CHAR(36);
ALTER TABLE folders ALTER COLUMN user_uuid TYPE CHAR(36);
ALTER TABLE collections ALTER COLUMN uuid TYPE CHAR(40);
ALTER TABLE collections ALTER COLUMN org_uuid TYPE CHAR(40);
ALTER TABLE users_collections ALTER COLUMN user_uuid TYPE CHAR(36);
ALTER TABLE users_collections ALTER COLUMN collection_uuid TYPE CHAR(36);
ALTER TABLE users_organizations ALTER COLUMN uuid TYPE CHAR(36);
ALTER TABLE users_organizations ALTER COLUMN user_uuid TYPE CHAR(36);
ALTER TABLE users_organizations ALTER COLUMN org_uuid TYPE CHAR(36);
ALTER TABLE folders_ciphers ALTER COLUMN cipher_uuid TYPE CHAR(36);
ALTER TABLE folders_ciphers ALTER COLUMN folder_uuid TYPE CHAR(36);
ALTER TABLE ciphers_collections ALTER COLUMN cipher_uuid TYPE CHAR(36);
ALTER TABLE ciphers_collections ALTER COLUMN collection_uuid TYPE CHAR(36);
ALTER TABLE twofactor ALTER COLUMN uuid TYPE CHAR(36);
ALTER TABLE twofactor ALTER COLUMN user_uuid TYPE CHAR(36);
ALTER TABLE invitations ALTER COLUMN email TYPE VARCHAR(255);

View File

@@ -0,0 +1,27 @@
-- Switch from CHAR() types to VARCHAR() types to avoid padding issues.
ALTER TABLE attachments ALTER COLUMN id TYPE TEXT;
ALTER TABLE attachments ALTER COLUMN cipher_uuid TYPE VARCHAR(40);
ALTER TABLE users ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE users ALTER COLUMN email TYPE TEXT;
ALTER TABLE devices ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE devices ALTER COLUMN user_uuid TYPE VARCHAR(40);
ALTER TABLE organizations ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE ciphers ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE ciphers ALTER COLUMN user_uuid TYPE VARCHAR(40);
ALTER TABLE ciphers ALTER COLUMN organization_uuid TYPE VARCHAR(40);
ALTER TABLE folders ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE folders ALTER COLUMN user_uuid TYPE VARCHAR(40);
ALTER TABLE collections ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE collections ALTER COLUMN org_uuid TYPE VARCHAR(40);
ALTER TABLE users_collections ALTER COLUMN user_uuid TYPE VARCHAR(40);
ALTER TABLE users_collections ALTER COLUMN collection_uuid TYPE VARCHAR(40);
ALTER TABLE users_organizations ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE users_organizations ALTER COLUMN user_uuid TYPE VARCHAR(40);
ALTER TABLE users_organizations ALTER COLUMN org_uuid TYPE VARCHAR(40);
ALTER TABLE folders_ciphers ALTER COLUMN cipher_uuid TYPE VARCHAR(40);
ALTER TABLE folders_ciphers ALTER COLUMN folder_uuid TYPE VARCHAR(40);
ALTER TABLE ciphers_collections ALTER COLUMN cipher_uuid TYPE VARCHAR(40);
ALTER TABLE ciphers_collections ALTER COLUMN collection_uuid TYPE VARCHAR(40);
ALTER TABLE twofactor ALTER COLUMN uuid TYPE VARCHAR(40);
ALTER TABLE twofactor ALTER COLUMN user_uuid TYPE VARCHAR(40);
ALTER TABLE invitations ALTER COLUMN email TYPE TEXT;

View File

@@ -0,0 +1 @@
ALTER TABLE twofactor ADD COLUMN last_used INTEGER NOT NULL DEFAULT 0;

View File

@@ -0,0 +1,9 @@
DROP TABLE users;
DROP TABLE devices;
DROP TABLE ciphers;
DROP TABLE attachments;
DROP TABLE folders;

View File

@@ -0,0 +1,8 @@
DROP TABLE collections;
DROP TABLE organizations;
DROP TABLE users_collections;
DROP TABLE users_organizations;

View File

@@ -0,0 +1 @@
DROP TABLE ciphers_collections;

View File

@@ -0,0 +1 @@
-- This file should undo anything in `up.sql`

View File

@@ -0,0 +1,3 @@
ALTER TABLE devices
ADD COLUMN
twofactor_remember TEXT;

View File

@@ -0,0 +1,8 @@
UPDATE users
SET totp_secret = (
SELECT twofactor.data FROM twofactor
WHERE twofactor.type = 0
AND twofactor.user_uuid = users.uuid
);
DROP TABLE twofactor;

View File

@@ -0,0 +1,3 @@
ALTER TABLE ciphers
ADD COLUMN
password_history TEXT;

View File

@@ -0,0 +1 @@
DROP TABLE invitations;

View File

@@ -0,0 +1,7 @@
ALTER TABLE users
ADD COLUMN
client_kdf_type INTEGER NOT NULL DEFAULT 0; -- PBKDF2
ALTER TABLE users
ADD COLUMN
client_kdf_iter INTEGER NOT NULL DEFAULT 100000;

View File

@@ -0,0 +1,7 @@
ALTER TABLE attachments RENAME COLUMN akey TO key;
ALTER TABLE ciphers RENAME COLUMN atype TO type;
ALTER TABLE devices RENAME COLUMN atype TO type;
ALTER TABLE twofactor RENAME COLUMN atype TO type;
ALTER TABLE users RENAME COLUMN akey TO key;
ALTER TABLE users_organizations RENAME COLUMN akey TO key;
ALTER TABLE users_organizations RENAME COLUMN atype TO type;

View File

@@ -0,0 +1,7 @@
ALTER TABLE attachments RENAME COLUMN key TO akey;
ALTER TABLE ciphers RENAME COLUMN type TO atype;
ALTER TABLE devices RENAME COLUMN type TO atype;
ALTER TABLE twofactor RENAME COLUMN type TO atype;
ALTER TABLE users RENAME COLUMN key TO akey;
ALTER TABLE users_organizations RENAME COLUMN key TO akey;
ALTER TABLE users_organizations RENAME COLUMN type TO atype;

View File

@@ -0,0 +1 @@
ALTER TABLE twofactor ADD COLUMN last_used INTEGER NOT NULL DEFAULT 0;

View File

@@ -1 +1 @@
nightly-2019-01-26
nightly-2019-11-17

View File

@@ -1,4 +1,5 @@
use serde_json::Value;
use std::process::Command;
use rocket::http::{Cookie, Cookies, SameSite};
use rocket::request::{self, FlashMessage, Form, FromRequest, Request};
@@ -6,41 +7,55 @@ use rocket::response::{content::Html, Flash, Redirect};
use rocket::{Outcome, Route};
use rocket_contrib::json::Json;
use crate::api::{ApiResult, EmptyResult};
use crate::api::{ApiResult, EmptyResult, JsonResult};
use crate::auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp};
use crate::config::ConfigBuilder;
use crate::db::{models::*, DbConn};
use crate::db::{backup_database, models::*, DbConn};
use crate::error::Error;
use crate::mail;
use crate::CONFIG;
pub fn routes() -> Vec<Route> {
if CONFIG.admin_token().is_none() {
return Vec::new();
if CONFIG.admin_token().is_none() && !CONFIG.disable_admin_token() {
return routes![admin_disabled];
}
routes![
admin_login,
get_users,
post_admin_login,
admin_page,
invite_user,
delete_user,
deauth_user,
remove_2fa,
update_revision_users,
post_config,
delete_config,
backup_db,
]
}
lazy_static! {
static ref CAN_BACKUP: bool = cfg!(feature = "sqlite") && Command::new("sqlite3").arg("-version").status().is_ok();
}
#[get("/")]
fn admin_disabled() -> &'static str {
"The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it"
}
const COOKIE_NAME: &str = "BWRS_ADMIN";
const ADMIN_PATH: &str = "/admin";
const BASE_TEMPLATE: &str = "admin/base";
const VERSION: Option<&str> = option_env!("GIT_VERSION");
#[get("/", rank = 2)]
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
// If there is an error, show it
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
let json = json!({"page_content": "admin/login", "error": msg});
let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg});
// Return the page
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
@@ -83,23 +98,27 @@ fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -
fn _validate_token(token: &str) -> bool {
match CONFIG.admin_token().as_ref() {
None => false,
Some(t) => t == token,
Some(t) => crate::crypto::ct_eq(t.trim(), token.trim()),
}
}
#[derive(Serialize)]
struct AdminTemplateData {
users: Vec<Value>,
page_content: String,
version: Option<&'static str>,
users: Vec<Value>,
config: Value,
can_backup: bool,
}
impl AdminTemplateData {
fn new(users: Vec<Value>) -> Self {
Self {
users,
page_content: String::from("admin/page"),
version: VERSION,
users,
config: CONFIG.prepare_json(),
can_backup: *CAN_BACKUP,
}
}
@@ -135,17 +154,26 @@ fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> Empt
err!("Invitations are not allowed")
}
let mut user = User::new(email);
user.save(&conn)?;
if CONFIG.mail_enabled() {
let mut user = User::new(email);
user.save(&conn)?;
let org_name = "bitwarden_rs";
mail::send_invite(&user.email, &user.uuid, None, None, &org_name, None)
} else {
let mut invitation = Invitation::new(data.email);
let invitation = Invitation::new(data.email);
invitation.save(&conn)
}
}
#[get("/users")]
fn get_users(_token: AdminToken, conn: DbConn) -> JsonResult {
let users = User::get_all(&conn);
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
Ok(Json(Value::Array(users_json)))
}
#[post("/users/<uuid>/delete")]
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let user = match User::find_by_uuid(&uuid, &conn) {
@@ -163,11 +191,29 @@ fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
None => err!("User doesn't exist"),
};
Device::delete_all_by_user(&user.uuid, &conn)?;
user.reset_security_stamp();
user.save(&conn)
}
#[post("/users/<uuid>/remove-2fa")]
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let mut user = match User::find_by_uuid(&uuid, &conn) {
Some(user) => user,
None => err!("User doesn't exist"),
};
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
user.totp_recover = None;
user.save(&conn)
}
#[post("/users/update_revision")]
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
User::update_all_revisions(&conn)
}
#[post("/config", data = "<data>")]
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
let data: ConfigBuilder = data.into_inner();
@@ -179,31 +225,44 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
CONFIG.delete_user_config()
}
#[post("/config/backup_db")]
fn backup_db(_token: AdminToken) -> EmptyResult {
if *CAN_BACKUP {
backup_database()
} else {
err!("Can't back up current DB (either it's not SQLite or the 'sqlite' binary is not present)");
}
}
pub struct AdminToken {}
impl<'a, 'r> FromRequest<'a, 'r> for AdminToken {
type Error = &'static str;
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
let mut cookies = request.cookies();
if CONFIG.disable_admin_token() {
Outcome::Success(AdminToken {})
} else {
let mut cookies = request.cookies();
let access_token = match cookies.get(COOKIE_NAME) {
Some(cookie) => cookie.value(),
None => return Outcome::Forward(()), // If there is no cookie, redirect to login
};
let access_token = match cookies.get(COOKIE_NAME) {
Some(cookie) => cookie.value(),
None => return Outcome::Forward(()), // If there is no cookie, redirect to login
};
let ip = match request.guard::<ClientIp>() {
Outcome::Success(ip) => ip.ip,
_ => err_handler!("Error getting Client IP"),
};
let ip = match request.guard::<ClientIp>() {
Outcome::Success(ip) => ip.ip,
_ => err_handler!("Error getting Client IP"),
};
if decode_admin(access_token).is_err() {
// Remove admin cookie
cookies.remove(Cookie::named(COOKIE_NAME));
error!("Invalid or expired admin JWT. IP: {}.", ip);
return Outcome::Forward(());
if decode_admin(access_token).is_err() {
// Remove admin cookie
cookies.remove(Cookie::named(COOKIE_NAME));
error!("Invalid or expired admin JWT. IP: {}.", ip);
return Outcome::Forward(());
}
Outcome::Success(AdminToken {})
}
Outcome::Success(AdminToken {})
}
}

View File

@@ -62,7 +62,11 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
let mut user = match User::find_by_mail(&data.Email, &conn) {
Some(user) => {
if !user.password_hash.is_empty() {
err!("User already exists")
if CONFIG.signups_allowed() {
err!("User already exists")
} else {
err!("Registration not allowed or user already exists")
}
}
if let Some(token) = data.Token {
@@ -82,14 +86,14 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
} else if CONFIG.signups_allowed() {
err!("Account with this email already exists")
} else {
err!("Registration not allowed")
err!("Registration not allowed or user already exists")
}
}
None => {
if CONFIG.signups_allowed() || Invitation::take(&data.Email, &conn) {
if CONFIG.signups_allowed() || Invitation::take(&data.Email, &conn) || CONFIG.can_signup_user(&data.Email) {
User::new(data.Email.clone())
} else {
err!("Registration not allowed")
err!("Registration not allowed or user already exists")
}
}
};
@@ -106,7 +110,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
}
user.set_password(&data.MasterPasswordHash);
user.key = data.Key;
user.akey = data.Key;
// Add extra fields if present
if let Some(name) = data.Name {
@@ -204,7 +208,7 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
}
user.set_password(&data.NewMasterPasswordHash);
user.key = data.Key;
user.akey = data.Key;
user.save(&conn)
}
@@ -231,7 +235,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
user.client_kdf_iter = data.KdfIterations;
user.client_kdf_type = data.Kdf;
user.set_password(&data.NewMasterPasswordHash);
user.key = data.Key;
user.akey = data.Key;
user.save(&conn)
}
@@ -306,7 +310,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
// Update user data
let mut user = headers.user;
user.key = data.Key;
user.akey = data.Key;
user.private_key = Some(data.PrivateKey);
user.reset_security_stamp();
@@ -322,6 +326,7 @@ fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -
err!("Invalid password")
}
Device::delete_all_by_user(&user.uuid, &conn)?;
user.reset_security_stamp();
user.save(&conn)
}
@@ -376,7 +381,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
user.email = data.NewEmail;
user.set_password(&data.NewMasterPasswordHash);
user.key = data.Key;
user.akey = data.Key;
user.save(&conn)
}

View File

@@ -74,10 +74,10 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
let user_json = headers.user.to_json(&conn);
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
let folders_json: Vec<Value> = folders.iter().map(|c| c.to_json()).collect();
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
let collections_json: Vec<Value> = collections.iter().map(|c| c.to_json()).collect();
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
let ciphers_json: Vec<Value> = ciphers
@@ -88,7 +88,7 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
let domains_json = if data.exclude_domains {
Value::Null
} else {
api::core::get_eq_domains(headers).unwrap().into_inner()
api::core::_get_eq_domains(headers, true).unwrap().into_inner()
};
Ok(Json(json!({
@@ -267,7 +267,7 @@ pub fn update_cipher_from_data(
err!("Attachment is not owned by the cipher")
}
saved_att.key = Some(attachment.Key);
saved_att.akey = Some(attachment.Key);
saved_att.file_name = attachment.FileName;
saved_att.save(&conn)?;
@@ -608,7 +608,7 @@ fn share_cipher_by_uuid(
None => err!("Invalid collection ID provided"),
Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, &conn) {
CollectionCipher::save(&cipher.uuid.clone(), &collection.uuid, &conn)?;
CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn)?;
shared_to_collection = true;
} else {
err!("No rights to modify the collection")
@@ -691,7 +691,7 @@ fn post_attachment(
};
let mut attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size);
attachment.key = attachment_key.clone();
attachment.akey = attachment_key.clone();
attachment.save(&conn).expect("Error saving attachment");
}
_ => error!("Invalid multipart name"),
@@ -854,11 +854,7 @@ fn move_cipher_selected(data: JsonUpcase<MoveCipherData>, headers: Headers, conn
// Move cipher
cipher.move_to_folder(data.FolderId.clone(), &user_uuid, &conn)?;
nt.send_cipher_update(
UpdateType::CipherUpdate,
&cipher,
&[user_uuid.clone()]
);
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &[user_uuid.clone()]);
}
Ok(())
@@ -874,8 +870,20 @@ fn move_cipher_selected_put(
move_cipher_selected(data, headers, conn, nt)
}
#[post("/ciphers/purge", data = "<data>")]
fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
#[derive(FromForm)]
struct OrganizationId {
#[form(field = "organizationId")]
org_id: String,
}
#[post("/ciphers/purge?<organization..>", data = "<data>")]
fn delete_all(
organization: Option<Form<OrganizationId>>,
data: JsonUpcase<PasswordData>,
headers: Headers,
conn: DbConn,
nt: Notify,
) -> EmptyResult {
let data: PasswordData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
@@ -885,19 +893,40 @@ fn delete_all(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn, nt
err!("Invalid password")
}
// Delete ciphers and their attachments
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn) {
cipher.delete(&conn)?;
}
match organization {
Some(org_data) => {
// Organization ID in query params, purging organization vault
match UserOrganization::find_by_user_and_org(&user.uuid, &org_data.org_id, &conn) {
None => err!("You don't have permission to purge the organization vault"),
Some(user_org) => {
if user_org.atype == UserOrgType::Owner {
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
Collection::delete_all_by_organization(&org_data.org_id, &conn)?;
nt.send_user_update(UpdateType::Vault, &user);
Ok(())
} else {
err!("You don't have permission to purge the organization vault");
}
}
}
}
None => {
// No organization ID in query params, purging user vault
// Delete ciphers and their attachments
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn) {
cipher.delete(&conn)?;
}
// Delete folders
for f in Folder::find_by_user(&user.uuid, &conn) {
f.delete(&conn)?;
}
// Delete folders
for f in Folder::find_by_user(&user.uuid, &conn) {
f.delete(&conn)?;
}
user.update_revision(&conn)?;
nt.send_user_update(UpdateType::Vault, &user);
Ok(())
user.update_revision(&conn)?;
nt.send_user_update(UpdateType::Vault, &user);
Ok(())
}
}
}
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult {

View File

@@ -25,7 +25,7 @@ pub fn routes() -> Vec<Route> {
fn get_folders(headers: Headers, conn: DbConn) -> JsonResult {
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
let folders_json: Vec<Value> = folders.iter().map(|c| c.to_json()).collect();
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
Ok(Json(json!({
"Data": folders_json,
@@ -59,7 +59,7 @@ pub struct FolderData {
fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
let data: FolderData = data.into_inner().data;
let mut folder = Folder::new(headers.user.uuid.clone(), data.Name);
let mut folder = Folder::new(headers.user.uuid, data.Name);
folder.save(&conn)?;
nt.send_folder_update(UpdateType::FolderCreate, &folder);

View File

@@ -33,10 +33,10 @@ use rocket::Route;
use rocket_contrib::json::Json;
use serde_json::Value;
use crate::db::DbConn;
use crate::api::{EmptyResult, JsonResult, JsonUpcase};
use crate::auth::Headers;
use crate::db::DbConn;
use crate::error::Error;
#[put("/devices/identifier/<uuid>/clear-token")]
fn clear_device_token(uuid: String) -> EmptyResult {
@@ -63,7 +63,7 @@ fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) ->
Ok(Json(json!({
"Id": headers.device.uuid,
"Name": headers.device.name,
"Type": headers.device.type_,
"Type": headers.device.atype,
"Identifier": headers.device.uuid,
"CreationDate": crate::util::format_date(&headers.device.created_at),
})))
@@ -81,6 +81,10 @@ const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
#[get("/settings/domains")]
fn get_eq_domains(headers: Headers) -> JsonResult {
_get_eq_domains(headers, false)
}
fn _get_eq_domains(headers: Headers, no_excluded: bool) -> JsonResult {
let user = headers.user;
use serde_json::from_str;
@@ -93,6 +97,10 @@ fn get_eq_domains(headers: Headers) -> JsonResult {
global.Excluded = excluded_globals.contains(&global.Type);
}
if no_excluded {
globals.retain(|g| !g.Excluded);
}
Ok(Json(json!({
"EquivalentDomains": equivalent_domains,
"GlobalEquivalentDomains": globals,
@@ -132,17 +140,44 @@ fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbC
#[get("/hibp/breach?<username>")]
fn hibp_breach(username: String) -> JsonResult {
let url = format!("https://haveibeenpwned.com/api/v2/breachedaccount/{}", username);
let user_agent = "Bitwarden_RS";
let url = format!(
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
username
);
use reqwest::{header::USER_AGENT, Client};
let value: Value = Client::new()
.get(&url)
.header(USER_AGENT, user_agent)
.send()?
.error_for_status()?
.json()?;
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
let hibp_client = Client::builder()
.use_sys_proxy()
.build()?;
Ok(Json(value))
let res = hibp_client.get(&url)
.header(USER_AGENT, user_agent)
.header("hibp-api-key", api_key)
.send()?;
// If we get a 404, return a 404, it means no breached accounts
if res.status() == 404 {
return Err(Error::empty().with_code(404));
}
let value: Value = res.error_for_status()?.json()?;
Ok(Json(value))
} else {
Ok(Json(json!([{
"Name": "HaveIBeenPwned",
"Title": "Manual HIBP Check",
"Domain": "haveibeenpwned.com",
"BreachDate": "2019-08-18T00:00:00Z",
"AddedDate": "2019-08-18T00:00:00Z",
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
"LogoPath": "/bwrs_static/hibp.png",
"PwnCount": 0,
"DataClasses": [
"Error - No API key set!"
]
}])))
}
}

View File

@@ -76,13 +76,13 @@ struct NewCollectionData {
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
let data: OrgData = data.into_inner().data;
let mut org = Organization::new(data.Name, data.BillingEmail);
let mut user_org = UserOrganization::new(headers.user.uuid.clone(), org.uuid.clone());
let mut collection = Collection::new(org.uuid.clone(), data.CollectionName);
let org = Organization::new(data.Name, data.BillingEmail);
let mut user_org = UserOrganization::new(headers.user.uuid, org.uuid.clone());
let collection = Collection::new(org.uuid.clone(), data.CollectionName);
user_org.key = data.Key;
user_org.akey = data.Key;
user_org.access_all = true;
user_org.type_ = UserOrgType::Owner as i32;
user_org.atype = UserOrgType::Owner as i32;
user_org.status = UserOrgStatus::Confirmed as i32;
org.save(&conn)?;
@@ -127,7 +127,7 @@ fn leave_organization(org_id: String, headers: Headers, conn: DbConn) -> EmptyRe
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
None => err!("User not part of organization"),
Some(user_org) => {
if user_org.type_ == UserOrgType::Owner {
if user_org.atype == UserOrgType::Owner {
let num_owners =
UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
@@ -221,7 +221,7 @@ fn post_organization_collections(
None => err!("Can't find organization details"),
};
let mut collection = Collection::new(org.uuid.clone(), data.Name);
let collection = Collection::new(org.uuid, data.Name);
collection.save(&conn)?;
Ok(Json(collection.to_json()))
@@ -262,7 +262,7 @@ fn post_organization_collection_update(
err!("Collection is not owned by organization");
}
collection.name = data.Name.clone();
collection.name = data.Name;
collection.save(&conn)?;
Ok(Json(collection.to_json()))
@@ -484,7 +484,7 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
}
if !CONFIG.mail_enabled() {
let mut invitation = Invitation::new(email.clone());
let invitation = Invitation::new(email.clone());
invitation.save(&conn)?;
}
@@ -505,7 +505,7 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
let mut new_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
let access_all = data.AccessAll.unwrap_or(false);
new_user.access_all = access_all;
new_user.type_ = new_type;
new_user.atype = new_type;
new_user.status = user_org_status;
// If no accessAll, add the collections received
@@ -581,7 +581,7 @@ fn reinvite_user(org_id: String, user_org: String, headers: AdminHeaders, conn:
Some(headers.user.email),
)?;
} else {
let mut invitation = Invitation::new(user.email.clone());
let invitation = Invitation::new(user.email);
invitation.save(&conn)?;
}
@@ -657,7 +657,7 @@ fn confirm_invite(
None => err!("The specified user isn't a member of the organization"),
};
if user_to_confirm.type_ != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
if user_to_confirm.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
err!("Only Owners can confirm Managers, Admins or Owners")
}
@@ -666,7 +666,7 @@ fn confirm_invite(
}
user_to_confirm.status = UserOrgStatus::Confirmed as i32;
user_to_confirm.key = match data["Key"].as_str() {
user_to_confirm.akey = match data["Key"].as_str() {
Some(key) => key.to_string(),
None => err!("Invalid key provided"),
};
@@ -735,18 +735,18 @@ fn edit_user(
None => err!("The specified user isn't member of the organization"),
};
if new_type != user_to_edit.type_
&& (user_to_edit.type_ >= UserOrgType::Admin || new_type >= UserOrgType::Admin)
if new_type != user_to_edit.atype
&& (user_to_edit.atype >= UserOrgType::Admin || new_type >= UserOrgType::Admin)
&& headers.org_user_type != UserOrgType::Owner
{
err!("Only Owners can grant and remove Admin or Owner privileges")
}
if user_to_edit.type_ == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner {
if user_to_edit.atype == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner {
err!("Only Owners can edit Owner users")
}
if user_to_edit.type_ == UserOrgType::Owner && new_type != UserOrgType::Owner {
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
// Removing owner permmission, check that there are at least another owner
let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
@@ -756,7 +756,7 @@ fn edit_user(
}
user_to_edit.access_all = data.AccessAll;
user_to_edit.type_ = new_type as i32;
user_to_edit.atype = new_type as i32;
// Delete all the odd collections
for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &user_to_edit.user_uuid, &conn) {
@@ -785,11 +785,11 @@ fn delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn:
None => err!("User to delete isn't member of the organization"),
};
if user_to_delete.type_ != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
if user_to_delete.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
err!("Only Owners can delete Admins or Owners")
}
if user_to_delete.type_ == UserOrgType::Owner {
if user_to_delete.atype == UserOrgType::Owner {
// Removing owner, check that there are at least another owner
let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
@@ -842,7 +842,7 @@ fn post_org_import(
None => err!("User is not part of the organization"),
};
if org_user.type_ < UserOrgType::Admin {
if org_user.atype < UserOrgType::Admin {
err!("Only admins or owners can import into an organization")
}
@@ -851,7 +851,7 @@ fn post_org_import(
.Collections
.into_iter()
.map(|coll| {
let mut collection = Collection::new(org_id.clone(), coll.Name);
let collection = Collection::new(org_id.clone(), coll.Name);
if collection.save(&conn).is_err() {
err!("Failed to create Collection");
}

View File

@@ -1,657 +0,0 @@
use data_encoding::BASE32;
use rocket_contrib::json::Json;
use serde_json;
use serde_json::Value;
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
use crate::auth::Headers;
use crate::crypto;
use crate::db::{
models::{TwoFactor, TwoFactorType, User},
DbConn,
};
use crate::error::{Error, MapResult};
use rocket::Route;
pub fn routes() -> Vec<Route> {
routes![
get_twofactor,
get_recover,
recover,
disable_twofactor,
disable_twofactor_put,
generate_authenticator,
activate_authenticator,
activate_authenticator_put,
generate_u2f,
generate_u2f_challenge,
activate_u2f,
activate_u2f_put,
generate_yubikey,
activate_yubikey,
activate_yubikey_put,
]
}
#[get("/two-factor")]
fn get_twofactor(headers: Headers, conn: DbConn) -> JsonResult {
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
let twofactors_json: Vec<Value> = twofactors.iter().map(|c| c.to_json_list()).collect();
Ok(Json(json!({
"Data": twofactors_json,
"Object": "list",
"ContinuationToken": null,
})))
}
#[post("/two-factor/get-recover", data = "<data>")]
fn get_recover(data: JsonUpcase<PasswordData>, headers: Headers) -> JsonResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
Ok(Json(json!({
"Code": user.totp_recover,
"Object": "twoFactorRecover"
})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct RecoverTwoFactor {
MasterPasswordHash: String,
Email: String,
RecoveryCode: String,
}
#[post("/two-factor/recover", data = "<data>")]
fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
let data: RecoverTwoFactor = data.into_inner().data;
use crate::db::models::User;
// Get the user
let mut user = match User::find_by_mail(&data.Email, &conn) {
Some(user) => user,
None => err!("Username or password is incorrect. Try again."),
};
// Check password
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Username or password is incorrect. Try again.")
}
// Check if recovery code is correct
if !user.check_valid_recovery_code(&data.RecoveryCode) {
err!("Recovery code is incorrect. Try again.")
}
// Remove all twofactors from the user
for twofactor in TwoFactor::find_by_user(&user.uuid, &conn) {
twofactor.delete(&conn)?;
}
// Remove the recovery code, not needed without twofactors
user.totp_recover = None;
user.save(&conn)?;
Ok(Json(json!({})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct DisableTwoFactorData {
MasterPasswordHash: String,
Type: NumberOrString,
}
#[post("/two-factor/disable", data = "<data>")]
fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
let user = headers.user;
if !user.check_valid_password(&password_hash) {
err!("Invalid password");
}
let type_ = data.Type.into_i32().expect("Invalid type");
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
twofactor.delete(&conn)?;
}
Ok(Json(json!({
"Enabled": false,
"Type": type_,
"Object": "twoFactorProvider"
})))
}
#[put("/two-factor/disable", data = "<data>")]
fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
disable_twofactor(data, headers, conn)
}
#[post("/two-factor/get-authenticator", data = "<data>")]
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let type_ = TwoFactorType::Authenticator as i32;
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn);
let (enabled, key) = match twofactor {
Some(tf) => (true, tf.data),
_ => (false, BASE32.encode(&crypto::get_random(vec![0u8; 20]))),
};
Ok(Json(json!({
"Enabled": enabled,
"Key": key,
"Object": "twoFactorAuthenticator"
})))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableAuthenticatorData {
MasterPasswordHash: String,
Key: String,
Token: NumberOrString,
}
#[post("/two-factor/authenticator", data = "<data>")]
fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableAuthenticatorData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
let key = data.Key;
let token = match data.Token.into_i32() {
Some(n) => n as u64,
None => err!("Malformed token"),
};
let mut user = headers.user;
if !user.check_valid_password(&password_hash) {
err!("Invalid password");
}
// Validate key as base32 and 20 bytes length
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
Ok(decoded) => decoded,
_ => err!("Invalid totp secret"),
};
if decoded_key.len() != 20 {
err!("Invalid key length")
}
let type_ = TwoFactorType::Authenticator;
let twofactor = TwoFactor::new(user.uuid.clone(), type_, key.to_uppercase());
// Validate the token provided with the key
if !twofactor.check_totp_code(token) {
err!("Invalid totp code")
}
_generate_recover_code(&mut user, &conn);
twofactor.save(&conn)?;
Ok(Json(json!({
"Enabled": true,
"Key": key,
"Object": "twoFactorAuthenticator"
})))
}
#[put("/two-factor/authenticator", data = "<data>")]
fn activate_authenticator_put(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_authenticator(data, headers, conn)
}
fn _generate_recover_code(user: &mut User, conn: &DbConn) {
if user.totp_recover.is_none() {
let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20]));
user.totp_recover = Some(totp_recover);
user.save(conn).ok();
}
}
use u2f::messages::{RegisterResponse, SignResponse, U2fSignRequest};
use u2f::protocol::{Challenge, U2f};
use u2f::register::Registration;
use crate::CONFIG;
const U2F_VERSION: &str = "U2F_V2";
lazy_static! {
static ref APP_ID: String = format!("{}/app-id.json", &CONFIG.domain());
static ref U2F: U2f = U2f::new(APP_ID.clone());
}
#[post("/two-factor/get-u2f", data = "<data>")]
fn generate_u2f(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
if !CONFIG.domain_set() {
err!("`DOMAIN` environment variable is not set. U2F disabled")
}
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let u2f_type = TwoFactorType::U2f as i32;
let enabled = TwoFactor::find_by_user_and_type(&user.uuid, u2f_type, &conn).is_some();
Ok(Json(json!({
"Enabled": enabled,
"Object": "twoFactorU2f"
})))
}
#[post("/two-factor/get-u2f-challenge", data = "<data>")]
fn generate_u2f_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let user_uuid = &user.uuid;
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fRegisterChallenge, &conn).challenge;
Ok(Json(json!({
"UserId": user.uuid,
"AppId": APP_ID.to_string(),
"Challenge": challenge,
"Version": U2F_VERSION,
})))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableU2FData {
Id: NumberOrString, // 1..5
Name: String,
MasterPasswordHash: String,
DeviceResponse: String,
}
// This struct is copied from the U2F lib
// to add an optional error code
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct RegisterResponseCopy {
pub registration_data: String,
pub version: String,
pub client_data: String,
pub error_code: Option<NumberOrString>,
}
impl RegisterResponseCopy {
fn into_response(self) -> RegisterResponse {
RegisterResponse {
registration_data: self.registration_data,
version: self.version,
client_data: self.client_data,
}
}
}
#[post("/two-factor/u2f", data = "<data>")]
fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableU2FData = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let tf_type = TwoFactorType::U2fRegisterChallenge as i32;
let tf_challenge = match TwoFactor::find_by_user_and_type(&user.uuid, tf_type, &conn) {
Some(c) => c,
None => err!("Can't recover challenge"),
};
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
tf_challenge.delete(&conn)?;
let response_copy: RegisterResponseCopy = serde_json::from_str(&data.DeviceResponse)?;
let error_code = response_copy
.error_code
.clone()
.map_or("0".into(), NumberOrString::into_string);
if error_code != "0" {
err!("Error registering U2F token")
}
let response = response_copy.into_response();
let registration = U2F.register_response(challenge.clone(), response)?;
// TODO: Allow more than one U2F device
let mut registrations = Vec::new();
registrations.push(registration);
let tf_registration = TwoFactor::new(
user.uuid.clone(),
TwoFactorType::U2f,
serde_json::to_string(&registrations).unwrap(),
);
tf_registration.save(&conn)?;
_generate_recover_code(&mut user, &conn);
Ok(Json(json!({
"Enabled": true,
"Challenge": {
"UserId": user.uuid,
"AppId": APP_ID.to_string(),
"Challenge": challenge,
"Version": U2F_VERSION,
},
"Object": "twoFactorU2f"
})))
}
#[put("/two-factor/u2f", data = "<data>")]
fn activate_u2f_put(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_u2f(data, headers, conn)
}
fn _create_u2f_challenge(user_uuid: &str, type_: TwoFactorType, conn: &DbConn) -> Challenge {
let challenge = U2F.generate_challenge().unwrap();
TwoFactor::new(user_uuid.into(), type_, serde_json::to_string(&challenge).unwrap())
.save(conn)
.expect("Error saving challenge");
challenge
}
// This struct is copied from the U2F lib
// because it doesn't implement Deserialize
#[derive(Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
struct RegistrationCopy {
pub key_handle: Vec<u8>,
pub pub_key: Vec<u8>,
pub attestation_cert: Option<Vec<u8>>,
}
impl Into<Registration> for RegistrationCopy {
fn into(self) -> Registration {
Registration {
key_handle: self.key_handle,
pub_key: self.pub_key,
attestation_cert: self.attestation_cert,
}
}
}
fn _parse_registrations(registations: &str) -> Vec<Registration> {
let registrations_copy: Vec<RegistrationCopy> =
serde_json::from_str(registations).expect("Can't parse RegistrationCopy data");
registrations_copy.into_iter().map(Into::into).collect()
}
pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRequest> {
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn);
let type_ = TwoFactorType::U2f as i32;
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
Some(tf) => tf,
None => err!("No U2F devices registered"),
};
let registrations = _parse_registrations(&twofactor.data);
let signed_request: U2fSignRequest = U2F.sign_request(challenge, registrations);
Ok(signed_request)
}
pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
let challenge_type = TwoFactorType::U2fLoginChallenge as i32;
let u2f_type = TwoFactorType::U2f as i32;
let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, &conn);
let challenge = match tf_challenge {
Some(tf_challenge) => {
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
tf_challenge.delete(&conn)?;
challenge
}
None => err!("Can't recover login challenge"),
};
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, u2f_type, conn) {
Some(tf) => tf,
None => err!("No U2F devices registered"),
};
let registrations = _parse_registrations(&twofactor.data);
let response: SignResponse = serde_json::from_str(response)?;
let mut _counter: u32 = 0;
for registration in registrations {
let response = U2F.sign_response(challenge.clone(), registration, response.clone(), _counter);
match response {
Ok(new_counter) => {
_counter = new_counter;
info!("O {:#}", new_counter);
return Ok(());
}
Err(e) => {
info!("E {:#}", e);
// break;
}
}
}
err!("error verifying response")
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableYubikeyData {
MasterPasswordHash: String,
Key1: Option<String>,
Key2: Option<String>,
Key3: Option<String>,
Key4: Option<String>,
Key5: Option<String>,
Nfc: bool,
}
#[derive(Deserialize, Serialize, Debug)]
#[allow(non_snake_case)]
pub struct YubikeyMetadata {
Keys: Vec<String>,
pub Nfc: bool,
}
use yubico::config::Config;
use yubico::Yubico;
fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
let data_keys = [&data.Key1, &data.Key2, &data.Key3, &data.Key4, &data.Key5];
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
}
fn jsonify_yubikeys(yubikeys: Vec<String>) -> serde_json::Value {
let mut result = json!({});
for (i, key) in yubikeys.into_iter().enumerate() {
result[format!("Key{}", i + 1)] = Value::String(key);
}
result
}
fn get_yubico_credentials() -> Result<(String, String), Error> {
match (CONFIG.yubico_client_id(), CONFIG.yubico_secret_key()) {
(Some(id), Some(secret)) => Ok((id, secret)),
_ => err!("`YUBICO_CLIENT_ID` or `YUBICO_SECRET_KEY` environment variable is not set. Yubikey OTP Disabled"),
}
}
fn verify_yubikey_otp(otp: String) -> EmptyResult {
let (yubico_id, yubico_secret) = get_yubico_credentials()?;
let yubico = Yubico::new();
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
match CONFIG.yubico_server() {
Some(server) => yubico.verify(otp, config.set_api_hosts(vec![server])),
None => yubico.verify(otp, config),
}
.map_res("Failed to verify OTP")
.and(Ok(()))
}
#[post("/two-factor/get-yubikey", data = "<data>")]
fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
// Make sure the credentials are set
get_yubico_credentials()?;
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let user_uuid = &user.uuid;
let yubikey_type = TwoFactorType::YubiKey as i32;
let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &conn);
if let Some(r) = r {
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
result["Enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result))
} else {
Ok(Json(json!({
"Enabled": false,
"Object": "twoFactorU2f",
})))
}
}
#[post("/two-factor/yubikey", data = "<data>")]
fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableYubikeyData = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
// Check if we already have some data
let yubikey_data = TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &conn);
if let Some(yubikey_data) = yubikey_data {
yubikey_data.delete(&conn)?;
}
let yubikeys = parse_yubikeys(&data);
if yubikeys.is_empty() {
return Ok(Json(json!({
"Enabled": false,
"Object": "twoFactorU2f",
})));
}
// Ensure they are valid OTPs
for yubikey in &yubikeys {
if yubikey.len() == 12 {
// YubiKey ID
continue;
}
verify_yubikey_otp(yubikey.to_owned()).map_res("Invalid Yubikey OTP provided")?;
}
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (&x[..12]).to_owned()).collect();
let yubikey_metadata = YubikeyMetadata {
Keys: yubikey_ids,
Nfc: data.Nfc,
};
let yubikey_registration = TwoFactor::new(
user.uuid.clone(),
TwoFactorType::YubiKey,
serde_json::to_string(&yubikey_metadata).unwrap(),
);
yubikey_registration.save(&conn)?;
_generate_recover_code(&mut user, &conn);
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
result["Enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result))
}
#[put("/two-factor/yubikey", data = "<data>")]
fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_yubikey(data, headers, conn)
}
pub fn validate_yubikey_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
if response.len() != 44 {
err!("Invalid Yubikey OTP length");
}
let yubikey_type = TwoFactorType::YubiKey as i32;
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &conn) {
Some(tf) => tf,
None => err!("No YubiKey devices registered"),
};
let yubikey_metadata: YubikeyMetadata =
serde_json::from_str(&twofactor.data).expect("Can't parse Yubikey Metadata");
let response_id = &response[..12];
if !yubikey_metadata.Keys.contains(&response_id.to_owned()) {
err!("Given Yubikey is not registered");
}
let result = verify_yubikey_otp(response.to_owned());
match result {
Ok(_answer) => Ok(()),
Err(_e) => err!("Failed to verify Yubikey against OTP server"),
}
}

View File

@@ -0,0 +1,155 @@
use data_encoding::BASE32;
use rocket::Route;
use rocket_contrib::json::Json;
use crate::api::core::two_factor::_generate_recover_code;
use crate::api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
use crate::auth::Headers;
use crate::crypto;
use crate::db::{
models::{TwoFactor, TwoFactorType},
DbConn,
};
pub use crate::config::CONFIG;
pub fn routes() -> Vec<Route> {
routes![
generate_authenticator,
activate_authenticator,
activate_authenticator_put,
]
}
#[post("/two-factor/get-authenticator", data = "<data>")]
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let type_ = TwoFactorType::Authenticator as i32;
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn);
let (enabled, key) = match twofactor {
Some(tf) => (true, tf.data),
_ => (false, BASE32.encode(&crypto::get_random(vec![0u8; 20]))),
};
Ok(Json(json!({
"Enabled": enabled,
"Key": key,
"Object": "twoFactorAuthenticator"
})))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableAuthenticatorData {
MasterPasswordHash: String,
Key: String,
Token: NumberOrString,
}
#[post("/two-factor/authenticator", data = "<data>")]
fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableAuthenticatorData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
let key = data.Key;
let token = data.Token.into_i32()? as u64;
let mut user = headers.user;
if !user.check_valid_password(&password_hash) {
err!("Invalid password");
}
// Validate key as base32 and 20 bytes length
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
Ok(decoded) => decoded,
_ => err!("Invalid totp secret"),
};
if decoded_key.len() != 20 {
err!("Invalid key length")
}
// Validate the token provided with the key, and save new twofactor
validate_totp_code(&user.uuid, token, &key.to_uppercase(), &conn)?;
_generate_recover_code(&mut user, &conn);
Ok(Json(json!({
"Enabled": true,
"Key": key,
"Object": "twoFactorAuthenticator"
})))
}
#[put("/two-factor/authenticator", data = "<data>")]
fn activate_authenticator_put(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_authenticator(data, headers, conn)
}
pub fn validate_totp_code_str(user_uuid: &str, totp_code: &str, secret: &str, conn: &DbConn) -> EmptyResult {
let totp_code: u64 = match totp_code.parse() {
Ok(code) => code,
_ => err!("TOTP code is not a number"),
};
validate_totp_code(user_uuid, totp_code, secret, &conn)
}
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, conn: &DbConn) -> EmptyResult {
use oath::{totp_raw_custom_time, HashType};
use std::time::{UNIX_EPOCH, SystemTime};
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
Ok(s) => s,
Err(_) => err!("Invalid TOTP secret"),
};
let mut twofactor = match TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Authenticator as i32, &conn) {
Some(tf) => tf,
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
};
// Get the current system time in UNIX Epoch (UTC)
let current_time: u64 = SystemTime::now().duration_since(UNIX_EPOCH)
.expect("Earlier than 1970-01-01 00:00:00 UTC").as_secs();
// The amount of steps back and forward in time
// Also check if we need to disable time drifted TOTP codes.
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
let steps = if CONFIG.authenticator_disable_time_drift() { 0 } else { 1 };
for step in -steps..=steps {
let time_step = (current_time / 30) as i32 + step;
// We need to calculate the time offsite and cast it as an i128.
// Else we can't do math with it on a default u64 variable.
let time_offset: i128 = (step * 30).into();
let generated = totp_raw_custom_time(&decoded_secret, 6, 0, 30, (current_time as i128 + time_offset) as u64, &HashType::SHA1);
// Check the the given code equals the generated and if the time_step is larger then the one last used.
if generated == totp_code && time_step > twofactor.last_used {
// If the step does not equals 0 the time is drifted either server or client side.
if step != 0 {
info!("TOTP Time drift detected. The step offset is {}", step);
}
// Save the last used time step so only totp time steps higher then this one are allowed.
// This will also save a newly created twofactor if the code is correct.
twofactor.last_used = time_step;
twofactor.save(&conn)?;
return Ok(());
} else if generated == totp_code && time_step <= twofactor.last_used {
warn!("This or a TOTP code within {} steps back and forward has already been used!", steps);
err!("Invalid TOTP Code!");
}
}
// Else no valide code received, deny access
err!("Invalid TOTP code!");
}

View File

@@ -0,0 +1,350 @@
use chrono::Utc;
use data_encoding::BASE64;
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json;
use crate::api::core::two_factor::_generate_recover_code;
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData};
use crate::auth::Headers;
use crate::crypto;
use crate::db::{
models::{TwoFactor, TwoFactorType, User},
DbConn,
};
use crate::error::MapResult;
use crate::CONFIG;
pub fn routes() -> Vec<Route> {
routes![
get_duo,
activate_duo,
activate_duo_put,
]
}
#[derive(Serialize, Deserialize)]
struct DuoData {
host: String,
ik: String,
sk: String,
}
impl DuoData {
fn global() -> Option<Self> {
match (CONFIG._enable_duo(), CONFIG.duo_host()) {
(true, Some(host)) => Some(Self {
host,
ik: CONFIG.duo_ikey().unwrap(),
sk: CONFIG.duo_skey().unwrap(),
}),
_ => None,
}
}
fn msg(s: &str) -> Self {
Self {
host: s.into(),
ik: s.into(),
sk: s.into(),
}
}
fn secret() -> Self {
Self::msg("<global_secret>")
}
fn obscure(self) -> Self {
let mut host = self.host;
let mut ik = self.ik;
let mut sk = self.sk;
let digits = 4;
let replaced = "************";
host.replace_range(digits.., replaced);
ik.replace_range(digits.., replaced);
sk.replace_range(digits.., replaced);
Self { host, ik, sk }
}
}
enum DuoStatus {
Global(DuoData),
// Using the global duo config
User(DuoData),
// Using the user's config
Disabled(bool), // True if there is a global setting
}
impl DuoStatus {
fn data(self) -> Option<DuoData> {
match self {
DuoStatus::Global(data) => Some(data),
DuoStatus::User(data) => Some(data),
DuoStatus::Disabled(_) => None,
}
}
}
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
#[post("/two-factor/get-duo", data = "<data>")]
fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let data = get_user_duo_data(&headers.user.uuid, &conn);
let (enabled, data) = match data {
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
DuoStatus::User(data) => (true, Some(data.obscure())),
DuoStatus::Disabled(true) => (false, Some(DuoData::msg(DISABLED_MESSAGE_DEFAULT))),
DuoStatus::Disabled(false) => (false, None),
};
let json = if let Some(data) = data {
json!({
"Enabled": enabled,
"Host": data.host,
"SecretKey": data.sk,
"IntegrationKey": data.ik,
"Object": "twoFactorDuo"
})
} else {
json!({
"Enabled": enabled,
"Object": "twoFactorDuo"
})
};
Ok(Json(json))
}
#[derive(Deserialize)]
#[allow(non_snake_case, dead_code)]
struct EnableDuoData {
MasterPasswordHash: String,
Host: String,
SecretKey: String,
IntegrationKey: String,
}
impl From<EnableDuoData> for DuoData {
fn from(d: EnableDuoData) -> Self {
Self {
host: d.Host,
ik: d.IntegrationKey,
sk: d.SecretKey,
}
}
}
fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
fn empty_or_default(s: &str) -> bool {
let st = s.trim();
st.is_empty() || s == DISABLED_MESSAGE_DEFAULT
}
!empty_or_default(&data.Host) && !empty_or_default(&data.SecretKey) && !empty_or_default(&data.IntegrationKey)
}
#[post("/two-factor/duo", data = "<data>")]
fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableDuoData = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let (data, data_str) = if check_duo_fields_custom(&data) {
let data_req: DuoData = data.into();
let data_str = serde_json::to_string(&data_req)?;
duo_api_request("GET", "/auth/v2/check", "", &data_req).map_res("Failed to validate Duo credentials")?;
(data_req.obscure(), data_str)
} else {
(DuoData::secret(), String::new())
};
let type_ = TwoFactorType::Duo;
let twofactor = TwoFactor::new(user.uuid.clone(), type_, data_str);
twofactor.save(&conn)?;
_generate_recover_code(&mut user, &conn);
Ok(Json(json!({
"Enabled": true,
"Host": data.host,
"SecretKey": data.sk,
"IntegrationKey": data.ik,
"Object": "twoFactorDuo"
})))
}
#[put("/two-factor/duo", data = "<data>")]
fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_duo(data, headers, conn)
}
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
use reqwest::{header::*, Client, Method};
use std::str::FromStr;
let url = format!("https://{}{}", &data.host, path);
let date = Utc::now().to_rfc2822();
let username = &data.ik;
let fields = [&date, method, &data.host, path, params];
let password = crypto::hmac_sign(&data.sk, &fields.join("\n"));
let m = Method::from_str(method).unwrap_or_default();
Client::new()
.request(m, &url)
.basic_auth(username, Some(password))
.header(USER_AGENT, AGENT)
.header(DATE, date)
.send()?
.error_for_status()?;
Ok(())
}
const DUO_EXPIRE: i64 = 300;
const APP_EXPIRE: i64 = 3600;
const AUTH_PREFIX: &str = "AUTH";
const DUO_PREFIX: &str = "TX";
const APP_PREFIX: &str = "APP";
fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
let type_ = TwoFactorType::Duo as i32;
// If the user doesn't have an entry, disabled
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, &conn) {
Some(t) => t,
None => return DuoStatus::Disabled(DuoData::global().is_some()),
};
// If the user has the required values, we use those
if let Ok(data) = serde_json::from_str(&twofactor.data) {
return DuoStatus::User(data);
}
// Otherwise, we try to use the globals
if let Some(global) = DuoData::global() {
return DuoStatus::Global(global);
}
// If there are no globals configured, just disable it
DuoStatus::Disabled(false)
}
// let (ik, sk, ak, host) = get_duo_keys();
fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> {
let data = User::find_by_mail(email, &conn)
.and_then(|u| get_user_duo_data(&u.uuid, &conn).data())
.or_else(DuoData::global)
.map_res("Can't fetch Duo keys")?;
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host))
}
pub fn generate_duo_signature(email: &str, conn: &DbConn) -> ApiResult<(String, String)> {
let now = Utc::now().timestamp();
let (ik, sk, ak, host) = get_duo_keys_email(email, conn)?;
let duo_sign = sign_duo_values(&sk, email, &ik, DUO_PREFIX, now + DUO_EXPIRE);
let app_sign = sign_duo_values(&ak, email, &ik, APP_PREFIX, now + APP_EXPIRE);
Ok((format!("{}:{}", duo_sign, app_sign), host))
}
fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64) -> String {
let val = format!("{}|{}|{}", email, ikey, expire);
let cookie = format!("{}|{}", prefix, BASE64.encode(val.as_bytes()));
format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie))
}
pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult {
let split: Vec<&str> = response.split(':').collect();
if split.len() != 2 {
err!("Invalid response length");
}
let auth_sig = split[0];
let app_sig = split[1];
let now = Utc::now().timestamp();
let (ik, sk, ak, _host) = get_duo_keys_email(email, conn)?;
let auth_user = parse_duo_values(&sk, auth_sig, &ik, AUTH_PREFIX, now)?;
let app_user = parse_duo_values(&ak, app_sig, &ik, APP_PREFIX, now)?;
if !crypto::ct_eq(&auth_user, app_user) || !crypto::ct_eq(&auth_user, email) {
err!("Error validating duo authentication")
}
Ok(())
}
fn parse_duo_values(key: &str, val: &str, ikey: &str, prefix: &str, time: i64) -> ApiResult<String> {
let split: Vec<&str> = val.split('|').collect();
if split.len() != 3 {
err!("Invalid value length")
}
let u_prefix = split[0];
let u_b64 = split[1];
let u_sig = split[2];
let sig = crypto::hmac_sign(key, &format!("{}|{}", u_prefix, u_b64));
if !crypto::ct_eq(crypto::hmac_sign(key, &sig), crypto::hmac_sign(key, u_sig)) {
err!("Duo signatures don't match")
}
if u_prefix != prefix {
err!("Prefixes don't match")
}
let cookie_vec = match BASE64.decode(u_b64.as_bytes()) {
Ok(c) => c,
Err(_) => err!("Invalid Duo cookie encoding"),
};
let cookie = match String::from_utf8(cookie_vec) {
Ok(c) => c,
Err(_) => err!("Invalid Duo cookie encoding"),
};
let cookie_split: Vec<&str> = cookie.split('|').collect();
if cookie_split.len() != 3 {
err!("Invalid cookie length")
}
let username = cookie_split[0];
let u_ikey = cookie_split[1];
let expire = cookie_split[2];
if !crypto::ct_eq(ikey, u_ikey) {
err!("Invalid ikey")
}
let expire = match expire.parse() {
Ok(e) => e,
Err(_) => err!("Invalid expire time"),
};
if time >= expire {
err!("Expired authorization")
}
Ok(username.into())
}

View File

@@ -0,0 +1,351 @@
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json;
use crate::api::core::two_factor::_generate_recover_code;
use crate::api::{EmptyResult, JsonResult, JsonUpcase, PasswordData};
use crate::auth::Headers;
use crate::crypto;
use crate::db::{
models::{TwoFactor, TwoFactorType},
DbConn,
};
use crate::error::Error;
use crate::mail;
use crate::CONFIG;
use chrono::{Duration, NaiveDateTime, Utc};
use std::ops::Add;
pub fn routes() -> Vec<Route> {
routes![
get_email,
send_email_login,
send_email,
email,
]
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct SendEmailLoginData {
Email: String,
MasterPasswordHash: String,
}
/// User is trying to login and wants to use email 2FA.
/// Does not require Bearer token
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> EmptyResult {
let data: SendEmailLoginData = data.into_inner().data;
use crate::db::models::User;
// Get the user
let user = match User::find_by_mail(&data.Email, &conn) {
Some(user) => user,
None => err!("Username or password is incorrect. Try again."),
};
// Check password
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Username or password is incorrect. Try again.")
}
if !CONFIG._enable_email_2fa() {
err!("Email 2FA is disabled")
}
send_token(&user.uuid, &conn)?;
Ok(())
}
/// Generate the token, save the data for later verification and send email to user
pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
let type_ = TwoFactorType::Email as i32;
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, &conn)?;
let generated_token = generate_token(CONFIG.email_token_size())?;
let mut twofactor_data = EmailTokenData::from_json(&twofactor.data)?;
twofactor_data.set_token(generated_token);
twofactor.data = twofactor_data.to_json();
twofactor.save(&conn)?;
mail::send_token(&twofactor_data.email, &twofactor_data.last_token?)?;
Ok(())
}
/// When user clicks on Manage email 2FA show the user the related information
#[post("/two-factor/get-email", data = "<data>")]
fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let type_ = TwoFactorType::Email as i32;
let enabled = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
Some(x) => x.enabled,
_ => false,
};
Ok(Json(json!({
"Email": user.email,
"Enabled": enabled,
"Object": "twoFactorEmail"
})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct SendEmailData {
/// Email where 2FA codes will be sent to, can be different than user email account.
Email: String,
MasterPasswordHash: String,
}
fn generate_token(token_size: u32) -> Result<String, Error> {
if token_size > 19 {
err!("Generating token failed")
}
// 8 bytes to create an u64 for up to 19 token digits
let bytes = crypto::get_random(vec![0; 8]);
let mut bytes_array = [0u8; 8];
bytes_array.copy_from_slice(&bytes);
let number = u64::from_be_bytes(bytes_array) % 10u64.pow(token_size);
let token = format!("{:0size$}", number, size = token_size as usize);
Ok(token)
}
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
#[post("/two-factor/send-email", data = "<data>")]
fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: SendEmailData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
if !CONFIG._enable_email_2fa() {
err!("Email 2FA is disabled")
}
let type_ = TwoFactorType::Email as i32;
if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
tf.delete(&conn)?;
}
let generated_token = generate_token(CONFIG.email_token_size())?;
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
let twofactor = TwoFactor::new(
user.uuid,
TwoFactorType::EmailVerificationChallenge,
twofactor_data.to_json(),
);
twofactor.save(&conn)?;
mail::send_token(&twofactor_data.email, &twofactor_data.last_token?)?;
Ok(())
}
#[derive(Deserialize, Serialize)]
#[allow(non_snake_case)]
struct EmailData {
Email: String,
MasterPasswordHash: String,
Token: String,
}
/// Verify email belongs to user and can be used for 2FA email codes.
#[put("/two-factor/email", data = "<data>")]
fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EmailData = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn)?;
let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
let issued_token = match &email_data.last_token {
Some(t) => t,
_ => err!("No token available"),
};
if !crypto::ct_eq(issued_token, data.Token) {
err!("Token is invalid")
}
email_data.reset_token();
twofactor.atype = TwoFactorType::Email as i32;
twofactor.data = email_data.to_json();
twofactor.save(&conn)?;
_generate_recover_code(&mut user, &conn);
Ok(Json(json!({
"Email": email_data.email,
"Enabled": "true",
"Object": "twoFactorEmail"
})))
}
/// Validate the email code when used as TwoFactor token mechanism
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
let mut email_data = EmailTokenData::from_json(&data)?;
let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn)?;
let issued_token = match &email_data.last_token {
Some(t) => t,
_ => err!("No token available"),
};
if !crypto::ct_eq(issued_token, token) {
email_data.add_attempt();
if email_data.attempts >= CONFIG.email_attempts_limit() {
email_data.reset_token();
}
twofactor.data = email_data.to_json();
twofactor.save(&conn)?;
err!("Token is invalid")
}
email_data.reset_token();
twofactor.data = email_data.to_json();
twofactor.save(&conn)?;
let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
let max_time = CONFIG.email_expiration_time() as i64;
if date.add(Duration::seconds(max_time)) < Utc::now().naive_utc() {
err!("Token has expired")
}
Ok(())
}
/// Data stored in the TwoFactor table in the db
#[derive(Serialize, Deserialize)]
pub struct EmailTokenData {
/// Email address where the token will be sent to. Can be different from account email.
pub email: String,
/// Some(token): last valid token issued that has not been entered.
/// None: valid token was used and removed.
pub last_token: Option<String>,
/// UNIX timestamp of token issue.
pub token_sent: i64,
/// Amount of token entry attempts for last_token.
pub attempts: u64,
}
impl EmailTokenData {
pub fn new(email: String, token: String) -> EmailTokenData {
EmailTokenData {
email,
last_token: Some(token),
token_sent: Utc::now().naive_utc().timestamp(),
attempts: 0,
}
}
pub fn set_token(&mut self, token: String) {
self.last_token = Some(token);
self.token_sent = Utc::now().naive_utc().timestamp();
}
pub fn reset_token(&mut self) {
self.last_token = None;
self.attempts = 0;
}
pub fn add_attempt(&mut self) {
self.attempts += 1;
}
pub fn to_json(&self) -> String {
serde_json::to_string(&self).unwrap()
}
pub fn from_json(string: &str) -> Result<EmailTokenData, Error> {
let res: Result<EmailTokenData, crate::serde_json::Error> = serde_json::from_str(&string);
match res {
Ok(x) => Ok(x),
Err(_) => err!("Could not decode EmailTokenData from string"),
}
}
}
/// Takes an email address and obscures it by replacing it with asterisks except two characters.
pub fn obscure_email(email: &str) -> String {
let split: Vec<&str> = email.split('@').collect();
let mut name = split[0].to_string();
let domain = &split[1];
let name_size = name.chars().count();
let new_name = match name_size {
1..=3 => "*".repeat(name_size),
_ => {
let stars = "*".repeat(name_size - 2);
name.truncate(2);
format!("{}{}", name, stars)
}
};
format!("{}@{}", new_name, &domain)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_obscure_email_long() {
let email = "bytes@example.ext";
let result = obscure_email(&email);
// Only first two characters should be visible.
assert_eq!(result, "by***@example.ext");
}
#[test]
fn test_obscure_email_short() {
let email = "byt@example.ext";
let result = obscure_email(&email);
// If it's smaller than 3 characters it should only show asterisks.
assert_eq!(result, "***@example.ext");
}
#[test]
fn test_token() {
let result = generate_token(19).unwrap();
assert_eq!(result.chars().count(), 19);
}
#[test]
fn test_token_too_large() {
let result = generate_token(20);
assert!(result.is_err(), "too large token should give an error");
}
}

View File

@@ -0,0 +1,146 @@
use data_encoding::BASE32;
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json;
use serde_json::Value;
use crate::api::{JsonResult, JsonUpcase, NumberOrString, PasswordData};
use crate::auth::Headers;
use crate::crypto;
use crate::db::{
models::{TwoFactor, User},
DbConn,
};
pub(crate) mod authenticator;
pub(crate) mod duo;
pub(crate) mod email;
pub(crate) mod u2f;
pub(crate) mod yubikey;
pub fn routes() -> Vec<Route> {
let mut routes = routes![
get_twofactor,
get_recover,
recover,
disable_twofactor,
disable_twofactor_put,
];
routes.append(&mut authenticator::routes());
routes.append(&mut duo::routes());
routes.append(&mut email::routes());
routes.append(&mut u2f::routes());
routes.append(&mut yubikey::routes());
routes
}
#[get("/two-factor")]
fn get_twofactor(headers: Headers, conn: DbConn) -> JsonResult {
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_list).collect();
Ok(Json(json!({
"Data": twofactors_json,
"Object": "list",
"ContinuationToken": null,
})))
}
#[post("/two-factor/get-recover", data = "<data>")]
fn get_recover(data: JsonUpcase<PasswordData>, headers: Headers) -> JsonResult {
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
Ok(Json(json!({
"Code": user.totp_recover,
"Object": "twoFactorRecover"
})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct RecoverTwoFactor {
MasterPasswordHash: String,
Email: String,
RecoveryCode: String,
}
#[post("/two-factor/recover", data = "<data>")]
fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
let data: RecoverTwoFactor = data.into_inner().data;
use crate::db::models::User;
// Get the user
let mut user = match User::find_by_mail(&data.Email, &conn) {
Some(user) => user,
None => err!("Username or password is incorrect. Try again."),
};
// Check password
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Username or password is incorrect. Try again.")
}
// Check if recovery code is correct
if !user.check_valid_recovery_code(&data.RecoveryCode) {
err!("Recovery code is incorrect. Try again.")
}
// Remove all twofactors from the user
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
// Remove the recovery code, not needed without twofactors
user.totp_recover = None;
user.save(&conn)?;
Ok(Json(json!({})))
}
fn _generate_recover_code(user: &mut User, conn: &DbConn) {
if user.totp_recover.is_none() {
let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20]));
user.totp_recover = Some(totp_recover);
user.save(conn).ok();
}
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct DisableTwoFactorData {
MasterPasswordHash: String,
Type: NumberOrString,
}
#[post("/two-factor/disable", data = "<data>")]
fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
let user = headers.user;
if !user.check_valid_password(&password_hash) {
err!("Invalid password");
}
let type_ = data.Type.into_i32()?;
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
twofactor.delete(&conn)?;
}
Ok(Json(json!({
"Enabled": false,
"Type": type_,
"Object": "twoFactorProvider"
})))
}
#[put("/two-factor/disable", data = "<data>")]
fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
disable_twofactor(data, headers, conn)
}

View File

@@ -0,0 +1,315 @@
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json;
use serde_json::Value;
use u2f::messages::{RegisterResponse, SignResponse, U2fSignRequest};
use u2f::protocol::{Challenge, U2f};
use u2f::register::Registration;
use crate::api::core::two_factor::_generate_recover_code;
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
use crate::auth::Headers;
use crate::db::{
models::{TwoFactor, TwoFactorType},
DbConn,
};
use crate::error::Error;
use crate::CONFIG;
const U2F_VERSION: &str = "U2F_V2";
lazy_static! {
static ref APP_ID: String = format!("{}/app-id.json", &CONFIG.domain());
static ref U2F: U2f = U2f::new(APP_ID.clone());
}
pub fn routes() -> Vec<Route> {
routes![
generate_u2f,
generate_u2f_challenge,
activate_u2f,
activate_u2f_put,
]
}
#[post("/two-factor/get-u2f", data = "<data>")]
fn generate_u2f(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
if !CONFIG.domain_set() {
err!("`DOMAIN` environment variable is not set. U2F disabled")
}
let data: PasswordData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let (enabled, keys) = get_u2f_registrations(&headers.user.uuid, &conn)?;
let keys_json: Vec<Value> = keys.iter().map(U2FRegistration::to_json).collect();
Ok(Json(json!({
"Enabled": enabled,
"Keys": keys_json,
"Object": "twoFactorU2f"
})))
}
#[post("/two-factor/get-u2f-challenge", data = "<data>")]
fn generate_u2f_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let _type = TwoFactorType::U2fRegisterChallenge;
let challenge = _create_u2f_challenge(&headers.user.uuid, _type, &conn).challenge;
Ok(Json(json!({
"UserId": headers.user.uuid,
"AppId": APP_ID.to_string(),
"Challenge": challenge,
"Version": U2F_VERSION,
})))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableU2FData {
Id: NumberOrString,
// 1..5
Name: String,
MasterPasswordHash: String,
DeviceResponse: String,
}
// This struct is referenced from the U2F lib
// because it doesn't implement Deserialize
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[serde(remote = "Registration")]
struct RegistrationDef {
key_handle: Vec<u8>,
pub_key: Vec<u8>,
attestation_cert: Option<Vec<u8>>,
}
#[derive(Serialize, Deserialize)]
struct U2FRegistration {
id: i32,
name: String,
#[serde(with = "RegistrationDef")]
reg: Registration,
counter: u32,
compromised: bool,
}
impl U2FRegistration {
fn to_json(&self) -> Value {
json!({
"Id": self.id,
"Name": self.name,
"Compromised": self.compromised,
})
}
}
// This struct is copied from the U2F lib
// to add an optional error code
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct RegisterResponseCopy {
pub registration_data: String,
pub version: String,
pub client_data: String,
pub error_code: Option<NumberOrString>,
}
impl Into<RegisterResponse> for RegisterResponseCopy {
fn into(self) -> RegisterResponse {
RegisterResponse {
registration_data: self.registration_data,
version: self.version,
client_data: self.client_data,
}
}
}
#[post("/two-factor/u2f", data = "<data>")]
fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableU2FData = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let tf_type = TwoFactorType::U2fRegisterChallenge as i32;
let tf_challenge = match TwoFactor::find_by_user_and_type(&user.uuid, tf_type, &conn) {
Some(c) => c,
None => err!("Can't recover challenge"),
};
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
tf_challenge.delete(&conn)?;
let response: RegisterResponseCopy = serde_json::from_str(&data.DeviceResponse)?;
let error_code = response
.error_code
.clone()
.map_or("0".into(), NumberOrString::into_string);
if error_code != "0" {
err!("Error registering U2F token")
}
let registration = U2F.register_response(challenge, response.into())?;
let full_registration = U2FRegistration {
id: data.Id.into_i32()?,
name: data.Name,
reg: registration,
compromised: false,
counter: 0,
};
let mut regs = get_u2f_registrations(&user.uuid, &conn)?.1;
// TODO: Check that there is no repeat Id
regs.push(full_registration);
save_u2f_registrations(&user.uuid, &regs, &conn)?;
_generate_recover_code(&mut user, &conn);
let keys_json: Vec<Value> = regs.iter().map(U2FRegistration::to_json).collect();
Ok(Json(json!({
"Enabled": true,
"Keys": keys_json,
"Object": "twoFactorU2f"
})))
}
#[put("/two-factor/u2f", data = "<data>")]
fn activate_u2f_put(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_u2f(data, headers, conn)
}
fn _create_u2f_challenge(user_uuid: &str, type_: TwoFactorType, conn: &DbConn) -> Challenge {
let challenge = U2F.generate_challenge().unwrap();
TwoFactor::new(user_uuid.into(), type_, serde_json::to_string(&challenge).unwrap())
.save(conn)
.expect("Error saving challenge");
challenge
}
fn save_u2f_registrations(user_uuid: &str, regs: &[U2FRegistration], conn: &DbConn) -> EmptyResult {
TwoFactor::new(user_uuid.into(), TwoFactorType::U2f, serde_json::to_string(regs)?).save(&conn)
}
fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<U2FRegistration>), Error> {
let type_ = TwoFactorType::U2f as i32;
let (enabled, regs) = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
Some(tf) => (tf.enabled, tf.data),
None => return Ok((false, Vec::new())), // If no data, return empty list
};
let data = match serde_json::from_str(&regs) {
Ok(d) => d,
Err(_) => {
// If error, try old format
let mut old_regs = _old_parse_registrations(&regs);
if old_regs.len() != 1 {
err!("The old U2F format only allows one device")
}
// Convert to new format
let new_regs = vec![U2FRegistration {
id: 1,
name: "Unnamed U2F key".into(),
reg: old_regs.remove(0),
compromised: false,
counter: 0,
}];
// Save new format
save_u2f_registrations(user_uuid, &new_regs, &conn)?;
new_regs
}
};
Ok((enabled, data))
}
fn _old_parse_registrations(registations: &str) -> Vec<Registration> {
#[derive(Deserialize)]
struct Helper(#[serde(with = "RegistrationDef")] Registration);
let regs: Vec<Value> = serde_json::from_str(registations).expect("Can't parse Registration data");
regs.into_iter()
.map(|r| serde_json::from_value(r).unwrap())
.map(|Helper(r)| r)
.collect()
}
pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRequest> {
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn);
let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn)?
.1
.into_iter()
.map(|r| r.reg)
.collect();
if registrations.is_empty() {
err!("No U2F devices registered")
}
Ok(U2F.sign_request(challenge, registrations))
}
pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
let challenge_type = TwoFactorType::U2fLoginChallenge as i32;
let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, &conn);
let challenge = match tf_challenge {
Some(tf_challenge) => {
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
tf_challenge.delete(&conn)?;
challenge
}
None => err!("Can't recover login challenge"),
};
let response: SignResponse = serde_json::from_str(response)?;
let mut registrations = get_u2f_registrations(user_uuid, conn)?.1;
if registrations.is_empty() {
err!("No U2F devices registered")
}
for reg in &mut registrations {
let response = U2F.sign_response(challenge.clone(), reg.reg.clone(), response.clone(), reg.counter);
match response {
Ok(new_counter) => {
reg.counter = new_counter;
save_u2f_registrations(user_uuid, &registrations, &conn)?;
return Ok(());
}
Err(u2f::u2ferror::U2fError::CounterTooLow) => {
reg.compromised = true;
save_u2f_registrations(user_uuid, &registrations, &conn)?;
err!("This device might be compromised!");
}
Err(e) => {
warn!("E {:#}", e);
// break;
}
}
}
err!("error verifying response")
}

View File

@@ -0,0 +1,198 @@
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json;
use serde_json::Value;
use yubico::config::Config;
use yubico::verify;
use crate::api::core::two_factor::_generate_recover_code;
use crate::api::{EmptyResult, JsonResult, JsonUpcase, PasswordData};
use crate::auth::Headers;
use crate::db::{
models::{TwoFactor, TwoFactorType},
DbConn,
};
use crate::error::{Error, MapResult};
use crate::CONFIG;
pub fn routes() -> Vec<Route> {
routes![
generate_yubikey,
activate_yubikey,
activate_yubikey_put,
]
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableYubikeyData {
MasterPasswordHash: String,
Key1: Option<String>,
Key2: Option<String>,
Key3: Option<String>,
Key4: Option<String>,
Key5: Option<String>,
Nfc: bool,
}
#[derive(Deserialize, Serialize, Debug)]
#[allow(non_snake_case)]
pub struct YubikeyMetadata {
Keys: Vec<String>,
pub Nfc: bool,
}
fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
let data_keys = [&data.Key1, &data.Key2, &data.Key3, &data.Key4, &data.Key5];
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
}
fn jsonify_yubikeys(yubikeys: Vec<String>) -> serde_json::Value {
let mut result = json!({});
for (i, key) in yubikeys.into_iter().enumerate() {
result[format!("Key{}", i + 1)] = Value::String(key);
}
result
}
fn get_yubico_credentials() -> Result<(String, String), Error> {
if !CONFIG._enable_yubico() {
err!("Yubico support is disabled");
}
match (CONFIG.yubico_client_id(), CONFIG.yubico_secret_key()) {
(Some(id), Some(secret)) => Ok((id, secret)),
_ => err!("`YUBICO_CLIENT_ID` or `YUBICO_SECRET_KEY` environment variable is not set. Yubikey OTP Disabled"),
}
}
fn verify_yubikey_otp(otp: String) -> EmptyResult {
let (yubico_id, yubico_secret) = get_yubico_credentials()?;
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
match CONFIG.yubico_server() {
Some(server) => verify(otp, config.set_api_hosts(vec![server])),
None => verify(otp, config),
}
.map_res("Failed to verify OTP")
.and(Ok(()))
}
#[post("/two-factor/get-yubikey", data = "<data>")]
fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
// Make sure the credentials are set
get_yubico_credentials()?;
let data: PasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
let user_uuid = &user.uuid;
let yubikey_type = TwoFactorType::YubiKey as i32;
let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &conn);
if let Some(r) = r {
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
result["Enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result))
} else {
Ok(Json(json!({
"Enabled": false,
"Object": "twoFactorU2f",
})))
}
}
#[post("/two-factor/yubikey", data = "<data>")]
fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: EnableYubikeyData = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password");
}
// Check if we already have some data
let mut yubikey_data = match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &conn) {
Some(data) => data,
None => TwoFactor::new(user.uuid.clone(), TwoFactorType::YubiKey, String::new()),
};
let yubikeys = parse_yubikeys(&data);
if yubikeys.is_empty() {
return Ok(Json(json!({
"Enabled": false,
"Object": "twoFactorU2f",
})));
}
// Ensure they are valid OTPs
for yubikey in &yubikeys {
if yubikey.len() == 12 {
// YubiKey ID
continue;
}
verify_yubikey_otp(yubikey.to_owned()).map_res("Invalid Yubikey OTP provided")?;
}
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (&x[..12]).to_owned()).collect();
let yubikey_metadata = YubikeyMetadata {
Keys: yubikey_ids,
Nfc: data.Nfc,
};
yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap();
yubikey_data.save(&conn)?;
_generate_recover_code(&mut user, &conn);
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
result["Enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result))
}
#[put("/two-factor/yubikey", data = "<data>")]
fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_yubikey(data, headers, conn)
}
pub fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResult {
if response.len() != 44 {
err!("Invalid Yubikey OTP length");
}
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(twofactor_data).expect("Can't parse Yubikey Metadata");
let response_id = &response[..12];
if !yubikey_metadata.Keys.contains(&response_id.to_owned()) {
err!("Given Yubikey is not registered");
}
let result = verify_yubikey_otp(response.to_owned());
match result {
Ok(_answer) => Ok(()),
Err(_e) => err!("Failed to verify Yubikey against OTP server"),
}
}

View File

@@ -1,14 +1,15 @@
use std::fs::{create_dir_all, remove_file, symlink_metadata, File};
use std::io::prelude::*;
use std::net::ToSocketAddrs;
use std::time::{Duration, SystemTime};
use rocket::http::ContentType;
use rocket::response::Content;
use rocket::Route;
use reqwest::{header::HeaderMap, Client, Response};
use reqwest::{header::HeaderMap, Client, Response, Url};
use rocket::http::{Cookie};
use rocket::http::Cookie;
use regex::Regex;
use soup::prelude::*;
@@ -22,28 +23,74 @@ pub fn routes() -> Vec<Route> {
const FALLBACK_ICON: &[u8; 344] = include_bytes!("../static/fallback-icon.png");
const ALLOWED_CHARS: &str = "_-.";
lazy_static! {
// Reuse the client between requests
static ref CLIENT: Client = Client::builder()
.use_sys_proxy()
.gzip(true)
.timeout(Duration::from_secs(5))
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
.default_headers(_header_map())
.build()
.unwrap();
}
fn is_valid_domain(domain: &str) -> bool {
// Don't allow empty or too big domains or path traversal
if domain.is_empty() || domain.len() > 255 || domain.contains("..") {
return false;
}
// Only alphanumeric or specific characters
for c in domain.chars() {
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
return false;
}
}
true
}
#[get("/<domain>/icon.png")]
fn icon(domain: String) -> Content<Vec<u8>> {
let icon_type = ContentType::new("image", "x-icon");
// Validate the domain to avoid directory traversal attacks
if domain.contains('/') || domain.contains("..") {
if !is_valid_domain(&domain) {
warn!("Invalid domain: {:#?}", domain);
return Content(icon_type, FALLBACK_ICON.to_vec());
}
let icon = get_icon(&domain);
Content(icon_type, get_icon(&domain))
}
Content(icon_type, icon)
fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips()
&& (domain, 0)
.to_socket_addrs()
.map(|x| {
for ip_port in x {
if !ip_port.ip().is_global() {
warn!("IP {} for domain '{}' is not a global IP!", ip_port.ip(), domain);
return true;
}
}
false
})
.unwrap_or(false);
// Skip the regex check if the previous one is true already
if !is_blacklisted {
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
let regex = Regex::new(&blacklist).expect("Valid Regex");
if regex.is_match(&domain) {
warn!("Blacklisted domain: {:#?} matched {:#?}", domain, blacklist);
is_blacklisted = true;
}
}
}
is_blacklisted
}
fn get_icon(domain: &str) -> Vec<u8> {
@@ -65,7 +112,9 @@ fn get_icon(domain: &str) -> Vec<u8> {
}
Err(e) => {
error!("Error downloading icon: {:?}", e);
mark_negcache(&path);
let miss_indicator = path + ".miss";
let empty_icon = Vec::new();
save_icon(&miss_indicator, &empty_icon);
FALLBACK_ICON.to_vec()
}
}
@@ -121,22 +170,23 @@ fn icon_is_negcached(path: &str) -> bool {
}
}
fn mark_negcache(path: &str) {
let miss_indicator = path.to_owned() + ".miss";
File::create(&miss_indicator).expect("Error creating negative cache marker");
}
fn icon_is_expired(path: &str) -> bool {
let expired = file_is_expired(path, CONFIG.icon_cache_ttl());
expired.unwrap_or(true)
}
#[derive(Debug)]
struct IconList {
struct Icon {
priority: u8,
href: String,
}
impl Icon {
fn new(priority: u8, href: String) -> Self {
Self { href, priority }
}
}
/// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response.
/// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies.
/// This does not mean that that location does exists, but it is the default location browser use.
@@ -149,13 +199,13 @@ struct IconList {
/// let (mut iconlist, cookie_str) = get_icon_url("github.com")?;
/// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?;
/// ```
fn get_icon_url(domain: &str) -> Result<(Vec<IconList>, String), Error> {
fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
// Default URL with secure and insecure schemes
let ssldomain = format!("https://{}", domain);
let httpdomain = format!("http://{}", domain);
// Create the iconlist
let mut iconlist: Vec<IconList> = Vec::new();
let mut iconlist: Vec<Icon> = Vec::new();
// Create the cookie_str to fill it all the cookies from the response
// These cookies can be used to request/download the favicon image.
@@ -166,14 +216,22 @@ fn get_icon_url(domain: &str) -> Result<(Vec<IconList>, String), Error> {
if let Ok(content) = resp {
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
let url = content.url().clone();
let raw_cookies = content.headers().get_all("set-cookie");
cookie_str = raw_cookies.iter().map(|raw_cookie| {
let cookie = Cookie::parse(raw_cookie.to_str().unwrap_or_default()).unwrap();
format!("{}={}; ", cookie.name(), cookie.value())
}).collect::<String>();
cookie_str = raw_cookies
.iter()
.filter_map(|raw_cookie| raw_cookie.to_str().ok())
.map(|cookie_str| {
if let Ok(cookie) = Cookie::parse(cookie_str) {
format!("{}={}; ", cookie.name(), cookie.value())
} else {
String::new()
}
})
.collect::<String>();
// Add the default favicon.ico to the list with the domain the content responded from.
iconlist.push(IconList { priority: 35, href: url.join("/favicon.ico").unwrap().into_string() });
iconlist.push(Icon::new(35, url.join("/favicon.ico").unwrap().into_string()));
let soup = Soup::from_reader(content)?;
// Search for and filter
@@ -185,15 +243,18 @@ fn get_icon_url(domain: &str) -> Result<(Vec<IconList>, String), Error> {
// Loop through all the found icons and determine it's priority
for favicon in favicons {
let sizes = favicon.get("sizes").unwrap_or_default();
let href = url.join(&favicon.get("href").unwrap_or_default()).unwrap().into_string();
let priority = get_icon_priority(&href, &sizes);
let sizes = favicon.get("sizes");
let href = favicon.get("href").expect("Missing href");
let full_href = url.join(&href).unwrap().into_string();
iconlist.push(IconList { priority, href })
let priority = get_icon_priority(&full_href, sizes);
iconlist.push(Icon::new(priority, full_href))
}
} else {
// Add the default favicon.ico to the list with just the given domain
iconlist.push(IconList { priority: 35, href: format!("{}/favicon.ico", ssldomain) });
iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain)));
iconlist.push(Icon::new(35, format!("{}/favicon.ico", httpdomain)));
}
// Sort the iconlist by priority
@@ -204,12 +265,24 @@ fn get_icon_url(domain: &str) -> Result<(Vec<IconList>, String), Error> {
}
fn get_page(url: &str) -> Result<Response, Error> {
//CLIENT.get(url).send()?.error_for_status().map_err(Into::into)
get_page_with_cookies(url, "")
}
fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error> {
CLIENT.get(url).header("cookie", cookie_str).send()?.error_for_status().map_err(Into::into)
if check_icon_domain_is_blacklisted(Url::parse(url).unwrap().host_str().unwrap_or_default()) {
err!("Favicon rel linked to a non blacklisted domain!");
}
if cookie_str.is_empty() {
CLIENT.get(url).send()?.error_for_status().map_err(Into::into)
} else {
CLIENT
.get(url)
.header("cookie", cookie_str)
.send()?
.error_for_status()
.map_err(Into::into)
}
}
/// Returns a Integer with the priority of the type of the icon which to prefer.
@@ -224,7 +297,7 @@ fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error>
/// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32");
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
/// ```
fn get_icon_priority(href: &str, sizes: &str) -> u8 {
fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
// Check if there is a dimension set
let (width, height) = parse_sizes(sizes);
@@ -272,19 +345,19 @@ fn get_icon_priority(href: &str, sizes: &str) -> u8 {
/// let (width, height) = parse_sizes("x128x128"); // (128, 128)
/// let (width, height) = parse_sizes("32"); // (0, 0)
/// ```
fn parse_sizes(sizes: &str) -> (u16, u16) {
fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
let mut width: u16 = 0;
let mut height: u16 = 0;
if !sizes.is_empty() {
if let Some(sizes) = sizes {
match Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap().captures(sizes.trim()) {
None => {},
None => {}
Some(dimensions) => {
if dimensions.len() >= 3 {
width = dimensions[1].parse::<u16>().unwrap_or_default();
height = dimensions[2].parse::<u16>().unwrap_or_default();
}
},
}
}
}
@@ -292,21 +365,22 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
}
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
let (mut iconlist, cookie_str) = get_icon_url(&domain)?;
if check_icon_domain_is_blacklisted(domain) {
err!("Domain is blacklisted", domain)
}
let (iconlist, cookie_str) = get_icon_url(&domain)?;
let mut buffer = Vec::new();
iconlist.truncate(5);
for icon in iconlist {
let url = icon.href;
info!("Downloading icon for {} via {}...", domain, url);
match get_page_with_cookies(&url, &cookie_str) {
for icon in iconlist.iter().take(5) {
match get_page_with_cookies(&icon.href, &cookie_str) {
Ok(mut res) => {
info!("Download finished for {}", url);
info!("Downloaded icon from {}", icon.href);
res.copy_to(&mut buffer)?;
break;
},
Err(_) => info!("Download failed for {}", url),
}
Err(_) => info!("Download failed for {}", icon.href),
};
}
@@ -318,11 +392,17 @@ fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
}
fn save_icon(path: &str, icon: &[u8]) {
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache");
if let Ok(mut f) = File::create(path) {
f.write_all(icon).expect("Error writing icon file");
};
match File::create(path) {
Ok(mut f) => {
f.write_all(icon).expect("Error writing icon file");
}
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache");
}
Err(e) => {
info!("Icon save error: {:?}", e);
}
}
}
fn _header_map() -> HeaderMap {

View File

@@ -1,20 +1,17 @@
use num_traits::FromPrimitive;
use rocket::request::{Form, FormItems, FromForm};
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json::Value;
use num_traits::FromPrimitive;
use crate::api::core::two_factor::email::EmailTokenData;
use crate::api::core::two_factor::{duo, email, yubikey};
use crate::api::{ApiResult, EmptyResult, JsonResult};
use crate::auth::ClientIp;
use crate::db::models::*;
use crate::db::DbConn;
use crate::util::{self, JsonMap};
use crate::api::{ApiResult, EmptyResult, JsonResult};
use crate::auth::ClientIp;
use crate::mail;
use crate::util;
use crate::CONFIG;
pub fn routes() -> Vec<Route> {
@@ -68,7 +65,7 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
"expires_in": expires_in,
"token_type": "Bearer",
"refresh_token": device.refresh_token,
"Key": user.key,
"Key": user.akey,
"PrivateKey": user.private_key,
})))
}
@@ -99,26 +96,19 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
)
}
// On iOS, device_type sends "iOS", on others it sends a number
let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(0);
let device_id = data.device_identifier.clone().expect("No device id provided");
let device_name = data.device_name.clone().expect("No device name provided");
let (mut device, new_device) = get_device(&data, &conn, &user);
// Find device or create new
let mut device = match Device::find_by_uuid(&device_id, &conn) {
Some(device) => {
// Check if owned device, and recreate if not
if device.user_uuid != user.uuid {
info!("Device exists but is owned by another user. The old device will be discarded");
Device::new(device_id, user.uuid.clone(), device_name, device_type)
} else {
device
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &conn)?;
if CONFIG.mail_enabled() && new_device {
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &device.updated_at, &device.name) {
error!("Error sending new device email: {:#?}", e);
if CONFIG.require_device_email() {
err!("Could not send login notification email. Please contact your administrator.")
}
}
None => Device::new(device_id, user.uuid.clone(), device_name, device_type),
};
let twofactor_token = twofactor_auth(&user.uuid, &data.clone(), &mut device, &conn)?;
}
// Common
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
@@ -132,7 +122,7 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
"expires_in": expires_in,
"token_type": "Bearer",
"refresh_token": device.refresh_token,
"Key": user.key,
"Key": user.akey,
"PrivateKey": user.private_key,
//"TwoFactorToken": "11122233333444555666777888999"
});
@@ -145,6 +135,35 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
Ok(Json(result))
}
/// Retrieves an existing device or creates a new device from ConnectData and the User
fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) {
// On iOS, device_type sends "iOS", on others it sends a number
let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(0);
let device_id = data.device_identifier.clone().expect("No device id provided");
let device_name = data.device_name.clone().expect("No device name provided");
let mut new_device = false;
// Find device or create new
let device = match Device::find_by_uuid(&device_id, &conn) {
Some(device) => {
// Check if owned device, and recreate if not
if device.user_uuid != user.uuid {
info!("Device exists but is owned by another user. The old device will be discarded");
new_device = true;
Device::new(device_id, user.uuid.clone(), device_name, device_type)
} else {
device
}
}
None => {
new_device = true;
Device::new(device_id, user.uuid.clone(), device_name, device_type)
}
};
(device, new_device)
}
fn twofactor_auth(
user_uuid: &str,
data: &ConnectData,
@@ -152,62 +171,50 @@ fn twofactor_auth(
conn: &DbConn,
) -> ApiResult<Option<String>> {
let twofactors = TwoFactor::find_by_user(user_uuid, conn);
let providers: Vec<_> = twofactors.iter().map(|tf| tf.type_).collect();
// No twofactor token if twofactor is disabled
if twofactors.is_empty() {
return Ok(None);
}
let provider = data.two_factor_provider.unwrap_or(providers[0]); // If we aren't given a two factor provider, asume the first one
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, asume the first one
let twofactor_code = match data.two_factor_token {
Some(ref code) => code,
None => err_json!(_json_err_twofactor(&providers, user_uuid, conn)?),
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?),
};
let twofactor = twofactors.iter().filter(|tf| tf.type_ == provider).nth(0);
let selected_twofactor = twofactors
.into_iter()
.filter(|tf| tf.atype == selected_id && tf.enabled)
.nth(0);
use crate::api::core::two_factor as _tf;
use crate::crypto::ct_eq;
let selected_data = _selected_data(selected_twofactor);
let mut remember = data.two_factor_remember.unwrap_or(0);
match TwoFactorType::from_i32(selected_id) {
Some(TwoFactorType::Authenticator) => _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, conn)?,
Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?,
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?,
Some(TwoFactorType::Duo) => _tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?,
Some(TwoFactorType::Email) => _tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn)?,
match TwoFactorType::from_i32(provider) {
Some(TwoFactorType::Remember) => {
match device.twofactor_remember {
Some(ref remember) if remember == twofactor_code => return Ok(None), // No twofactor token needed here
_ => err_json!(_json_err_twofactor(&providers, user_uuid, conn)?),
Some(ref code) if !CONFIG.disable_2fa_remember() && ct_eq(code, twofactor_code) => {
remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time
}
_ => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?),
}
}
Some(TwoFactorType::Authenticator) => {
let twofactor = match twofactor {
Some(tf) => tf,
None => err!("TOTP not enabled"),
};
let totp_code: u64 = match twofactor_code.parse() {
Ok(code) => code,
_ => err!("Invalid TOTP code"),
};
if !twofactor.check_totp_code(totp_code) {
err_json!(_json_err_twofactor(&providers, user_uuid, conn)?)
}
}
Some(TwoFactorType::U2f) => {
use crate::api::core::two_factor;
two_factor::validate_u2f_login(user_uuid, &twofactor_code, conn)?;
}
Some(TwoFactorType::YubiKey) => {
use crate::api::core::two_factor;
two_factor::validate_yubikey_login(user_uuid, twofactor_code, conn)?;
}
_ => err!("Invalid two factor provider"),
}
if data.two_factor_remember.unwrap_or(0) == 1 {
if !CONFIG.disable_2fa_remember() && remember == 1 {
Ok(Some(device.refresh_twofactor_remember()))
} else {
device.delete_twofactor_remember();
@@ -215,6 +222,13 @@ fn twofactor_auth(
}
}
fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
match tf {
Some(tf) => Ok(tf.data),
None => err!("Two factor doesn't exist"),
}
}
fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {
use crate::api::core::two_factor;
@@ -232,26 +246,37 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
Some(TwoFactorType::U2f) if CONFIG.domain_set() => {
let request = two_factor::generate_u2f_login(user_uuid, conn)?;
let request = two_factor::u2f::generate_u2f_login(user_uuid, conn)?;
let mut challenge_list = Vec::new();
for key in request.registered_keys {
let mut challenge_map = JsonMap::new();
challenge_map.insert("appId".into(), Value::String(request.app_id.clone()));
challenge_map.insert("challenge".into(), Value::String(request.challenge.clone()));
challenge_map.insert("version".into(), Value::String(key.version));
challenge_map.insert("keyHandle".into(), Value::String(key.key_handle.unwrap_or_default()));
challenge_list.push(Value::Object(challenge_map));
challenge_list.push(json!({
"appId": request.app_id,
"challenge": request.challenge,
"version": key.version,
"keyHandle": key.key_handle,
}));
}
let mut map = JsonMap::new();
use serde_json;
let challenge_list_str = serde_json::to_string(&challenge_list).unwrap();
map.insert("Challenges".into(), Value::String(challenge_list_str));
result["TwoFactorProviders2"][provider.to_string()] = Value::Object(map);
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Challenges": challenge_list_str,
});
}
Some(TwoFactorType::Duo) => {
let email = match User::find_by_uuid(user_uuid, &conn) {
Some(u) => u.email,
None => err!("User does not exist"),
};
let (signature, host) = duo::generate_duo_signature(&email, conn)?;
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Host": host,
"Signature": signature,
});
}
Some(tf_type @ TwoFactorType::YubiKey) => {
@@ -260,12 +285,30 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
None => err!("No YubiKey devices registered"),
};
let yubikey_metadata: two_factor::YubikeyMetadata =
serde_json::from_str(&twofactor.data).expect("Can't parse Yubikey Metadata");
let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
let mut map = JsonMap::new();
map.insert("Nfc".into(), Value::Bool(yubikey_metadata.Nfc));
result["TwoFactorProviders2"][provider.to_string()] = Value::Object(map);
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Nfc": yubikey_metadata.Nfc,
})
}
Some(tf_type @ TwoFactorType::Email) => {
use crate::api::core::two_factor as _tf;
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, &conn) {
Some(tf) => tf,
None => err!("No twofactor email registered"),
};
// Send email immediately if email is the only 2FA option
if providers.len() == 1 {
_tf::email::send_token(&user_uuid, &conn)?
}
let email_data = EmailTokenData::from_json(&twofactor.data)?;
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Email": email::obscure_email(&email_data.email),
})
}
_ => {}

View File

@@ -47,10 +47,13 @@ impl NumberOrString {
}
}
fn into_i32(self) -> Option<i32> {
fn into_i32(self) -> ApiResult<i32> {
use std::num::ParseIntError as PIE;
match self {
NumberOrString::Number(n) => Some(n),
NumberOrString::String(s) => s.parse().ok(),
NumberOrString::Number(n) => Ok(n),
NumberOrString::String(s) => s
.parse()
.map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string())),
}
}
}

View File

@@ -88,7 +88,7 @@ fn serialize(val: Value) -> Vec<u8> {
fn serialize_date(date: NaiveDateTime) -> Value {
let seconds: i64 = date.timestamp();
let nanos: i64 = date.timestamp_subsec_nanos() as i64;
let nanos: i64 = date.timestamp_subsec_nanos().into();
let timestamp = nanos << 34 | seconds;
let bs = timestamp.to_be_bytes();
@@ -157,8 +157,6 @@ impl Handler for WSHandler {
}
fn on_message(&mut self, msg: Message) -> ws::Result<()> {
info!("Server got message '{}'. ", msg);
if let Message::Text(text) = msg.clone() {
let json = &text[..text.len() - 1]; // Remove last char
@@ -230,7 +228,7 @@ pub struct WebSocketUsers {
}
impl WebSocketUsers {
fn send_update(&self, user_uuid: &String, data: &[u8]) -> ws::Result<()> {
fn send_update(&self, user_uuid: &str, data: &[u8]) -> ws::Result<()> {
if let Some(user) = self.map.get(user_uuid) {
for sender in user.iter() {
sender.send(data)?;
@@ -249,7 +247,7 @@ impl WebSocketUsers {
ut,
);
self.send_update(&user.uuid.clone(), &data).ok();
self.send_update(&user.uuid, &data).ok();
}
pub fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {

Some files were not shown because too many files have changed in this diff Show More