Compare commits

...

360 Commits

Author SHA1 Message Date
Daniel García
fd27759a95 Merge pull request #1546 from RealOrangeOne/clippy-run
Run Clippy
2021-03-28 16:04:09 +02:00
Daniel García
01d8056c73 Merge pull request #1545 from RealOrangeOne/icon-client-cache
Client caching
2021-03-28 16:03:16 +02:00
Jake Howard
81fa33ebb5 Remove unnecessary reference 2021-03-28 10:59:49 +01:00
Jake Howard
e8aa3bc066 Merge branch 'master' into clippy-run 2021-03-28 10:51:25 +01:00
Jake Howard
0bf0125e82 Reverse negation on ordering
Co-authored-by: Daniel García <dani-garcia@users.noreply.github.com>
2021-03-28 10:49:29 +01:00
Jake Howard
6209e778e5 Icons should always be cached using full TTL 2021-03-28 10:39:12 +01:00
Daniel García
5323283f98 Merge pull request #1548 from BlackDex/admin-interface
Updated diagnostics page
2021-03-28 01:31:38 +01:00
BlackDex
57e17d0648 Updated diagnostics page
- Added reverse proxy check
- Better deffinition of internet proxy
- Added SQL Server version detection
2021-03-28 00:10:01 +01:00
Jake Howard
da55d5ec70 Also run actions CI on pull request
`push` only counts for pushes to branches on the repo, not forks
2021-03-27 15:21:00 +00:00
Jake Howard
828a060698 Run clippy on CI 2021-03-27 15:10:00 +00:00
Jake Howard
3e5971b9db Remove unnecessary result return types 2021-03-27 15:07:26 +00:00
Jake Howard
47c2625d38 Prevent clippy complaining at method
It's not incorrectly wrapped. We care about the return type being `Option`.
2021-03-27 14:36:50 +00:00
Jake Howard
49af9cf4f5 Correctly camelCase acronyms
https://rust-lang.github.io/rust-clippy/master/index.html#upper_case_acronyms
2021-03-27 14:26:32 +00:00
Jake Howard
6b1daeba05 Implement From over Into
https://rust-lang.github.io/rust-clippy/master/index.html#from_over_into
2021-03-27 14:19:57 +00:00
Jake Howard
9f1240d8d9 Only construct JSON object if it's useful 2021-03-27 14:03:46 +00:00
Jake Howard
a8138be69b Use if let more 2021-03-27 14:03:31 +00:00
Jake Howard
ea57dc3bc9 Use matches macro 2021-03-27 14:03:07 +00:00
Jake Howard
131348a49f Add immutable caching for vault assets
The URLs are cachebusted, so updates will still be applied cleanly and immediately
2021-03-27 13:37:56 +00:00
Jake Howard
b22564cb00 Cache icons on the client
This should make the vault pages load much faster, and massively reduce the number of requests.
2021-03-27 13:30:40 +00:00
Daniel García
16eb0a56f9 Exclude vendored scripts from Github language statistics 2021-03-25 21:39:34 +01:00
Daniel García
3e4ff47a38 Update dependencies, particularly openssl to 1.1.1k 2021-03-25 20:05:20 +01:00
Daniel García
8ea01a67f6 Merge pull request #1529 from mprasil/more-generic-send-error-messages
Return generic message when Send not available
2021-03-25 19:56:24 +01:00
Miro Prasil
aa5cc642e1 Use constant for the "inaccessible" error message 2021-03-25 11:40:32 +00:00
Daniel García
a121cb6f00 Merge pull request #1530 from jjlin/global-domains
Sync global_domains.json
2021-03-23 23:48:20 +01:00
Daniel García
60164182ae Fix alpine armv7 build
Reference: https://github.com/messense/rust-musl-cross/pull/34
2021-03-23 23:47:12 +01:00
Jeremy Lin
f842a80cdb Sync global_domains.json to bitwarden/server@455e4b2 (ProtonMail/ProtonVPN) 2021-03-23 11:30:00 -07:00
Miro Prasil
4b6a574ee0 Return generic message when Send not available
This should help avoid leaking information about (non)existence of Send
and be more in line with what official server returns.
2021-03-23 13:39:09 +00:00
Daniel García
f9ebb780f9 Update dependencies 2021-03-22 20:00:57 +01:00
Daniel García
1fc6c30652 Send deletion thread and updated users revision 2021-03-22 19:57:35 +01:00
Daniel García
46a1a013cd Update user revision date with sends 2021-03-22 19:05:15 +01:00
Daniel García
551810c486 Fix updating file send 2021-03-17 19:39:48 +01:00
Daniel García
b987ba506d Merge pull request #1493 from jjlin/send
Add support for the Disable Send policy
2021-03-16 18:13:55 +01:00
Daniel García
84810f2bb2 Remove unnecessary fields from send access 2021-03-16 18:11:25 +01:00
Jeremy Lin
424d666a50 Add support for the Disable Send policy
Upstream refs:

* https://github.com/bitwarden/server/pull/1130
* https://bitwarden.com/help/article/policies/#disable-send
2021-03-16 02:07:45 -07:00
Daniel García
a71359f647 Merge pull request #1469 from jjlin/cors
CORS fixes
2021-03-15 16:57:00 +01:00
Daniel García
d93c344176 Merge branch 'master' into cors 2021-03-15 16:49:12 +01:00
Daniel García
b9c3213b90 Merge pull request #1487 from jjlin/send
Send access check fixes
2021-03-15 16:47:14 +01:00
Daniel García
95e24ffc51 rename send key -> akey 2021-03-15 16:42:20 +01:00
Jeremy Lin
00d56d7295 Send access check fixes
Adjust checks for max access count, expiration date, and deletion date.
The date checks aren't that important, but the access count check
currently allows one more access than it should.
2021-03-14 23:20:49 -07:00
Daniel García
7436b454db Update web vault to 2.19.0 2021-03-14 23:36:49 +01:00
Daniel García
8da5b99482 Send API 2021-03-14 23:35:55 +01:00
Daniel García
2969e87b52 Add separate host-only fromrequest handler 2021-03-14 23:24:47 +01:00
Daniel García
ce62e898c3 Remove debug impl from database structs
This is only implemented for the database specific structs, which is not what we want
2021-03-13 22:04:04 +01:00
Daniel García
431462d839 Update dependencies and enable serde integration for chrono 2021-03-13 22:02:11 +01:00
Jeremy Lin
7d0e234b34 CORS fixes
* The Safari extension apparently now uses the origin `file://` and expects
  that to be returned (see bitwarden/browser#1311, bitwarden/server#800).

* The `Access-Control-Allow-Origin` header was reflecting the value of the
  `Origin` header without checking whether the origin was actually allowed.
  This effectively allows any origin to interact with the server, which
  defeats the purpose of CORS.
2021-03-07 00:35:08 -08:00
Daniel García
dad1b1bee9 Updated dependencies 2021-03-06 22:04:01 +01:00
Daniel García
9312cebee3 Merge pull request #1463 from std2main/patch-2
Add a dot in find command.
2021-03-06 00:04:12 +01:00
std2main
cdf5b6ec2d Add a dot in find command.
Add a dot indicting current directory to search by find.

find in mac won't work without the dot
2021-03-05 15:49:45 -05:00
Mathijs van Veluw
ce99fc8f95 Merge pull request #1460 from jjlin/invitation-org-name
Fix custom org name in invitation confirmation email
2021-03-04 08:15:34 +01:00
Jeremy Lin
a75d050001 Fix custom org name in invitation confirmation email
The org name in the invitation email was made customizable in 8867626, but
the org name is still hardcoded as "bitwarden_rs" in the confirmation email.
2021-03-03 23:03:55 -08:00
Daniel García
75cfd10f11 Merge pull request #1444 from jjlin/remove-md5
Remove `md5.js` dependency
2021-02-28 18:23:27 +01:00
Daniel García
9859ba6339 Merge pull request #1443 from jjlin/data-folder
Check for data folder on startup
2021-02-28 18:22:46 +01:00
Jeremy Lin
513056f711 Check for data folder on startup
Currently, when starting up for the first time (running standalone, outside
of Docker), bitwarden_rs panics when the `openssl` tool isn't able to create
`data/rsa_key.pem` due to the `data` dir not existing. Instead, print a more
helpful error message telling the user to create the directory.
2021-02-28 01:45:05 -08:00
Mathijs van Veluw
ebe334fcc7 Merge pull request #1447 from jjlin/issue-templates
Allow only bug report issues
2021-02-28 08:32:04 +01:00
Jeremy Lin
0eec12472e Allow only bug report issues
Remove templates for other issue types, directing them to the forum instead.
2021-02-27 22:13:51 -08:00
Jeremy Lin
39106d440a Remove md5.js dependency
Switch to the built-in WebCrypto APIs for computing identicon hashes.
2021-02-26 21:48:01 -08:00
Daniel García
9117095764 Update dependencies and web vault 2021-02-24 20:30:19 +01:00
Daniel García
099bba950c Merge pull request #1432 from jjlin/2fa
Change `twofactorauth.org` to `2fa.directory`
2021-02-24 20:05:57 +01:00
Jeremy Lin
e37ff60617 Change twofactorauth.org to 2fa.directory
The `twofactorauth.org` has apparently been sold to some company for
marketing purposes.
2021-02-23 18:51:07 -08:00
Daniel García
5b14608041 Update web vault to have better error messages when not using HTTPS 2021-02-20 19:13:20 +01:00
Daniel García
ad92692bab Merge pull request #1413 from paolobarbolini/email-clones
Remove unnecessary allocations
2021-02-20 17:58:12 +01:00
Paolo Barbolini
d956d42903 Remove unnecessary allocations 2021-02-19 20:17:18 +01:00
Daniel García
d69be7d03a Merge pull request #1389 from jjlin/alpine
Update Alpine base images to 3.13
2021-02-15 20:58:13 +01:00
Jeremy Lin
f82de8d00d Update Alpine base images to 3.13 2021-02-14 15:18:47 -08:00
Daniel García
c836f88ff2 Remove soup and use a newer html5ever directly 2021-02-07 22:28:02 +01:00
Daniel García
8b660ae090 Swap structopt for a simpler alternative 2021-02-07 20:10:40 +01:00
Daniel García
9323c57f49 Remove debug print 2021-02-07 00:22:39 +01:00
Daniel García
85e3c73525 Basic experimental ldap import support with the official directory connector 2021-02-06 20:15:42 +01:00
Daniel García
a74bc2e58f Update web vault to 2.18.1b 2021-02-06 16:49:49 +01:00
Daniel García
0680638933 Update dependencies 2021-02-06 16:49:28 +01:00
Daniel García
46d31ee5f7 Merge pull request #1356 from BlackDex/fix-config-bug
Fixed small buggy in validation
2021-02-03 23:50:49 +01:00
BlackDex
e794b397d3 Fixed small buggy in validation 2021-02-03 23:47:48 +01:00
Daniel García
d41350050b Merge pull request #1353 from BlackDex/admin-interface
Extra features for admin interface.
2021-02-03 22:50:15 +01:00
Mathijs van Veluw
4cd5b06b7f Merge branch 'master' into admin-interface 2021-02-03 22:41:59 +01:00
Daniel García
cd768439d2 Merge pull request #1329 from BlackDex/misc-updates
JSON Response updates and small fixes
2021-02-03 22:37:59 +01:00
Mathijs van Veluw
9e5fd2d576 Merge branch 'master' into admin-interface 2021-02-03 22:22:33 +01:00
Mathijs van Veluw
ecb46f591c Merge branch 'master' into misc-updates 2021-02-03 22:22:06 +01:00
Daniel García
d62d53aa8e Merge pull request #1341 from BlackDex/dep-update
Updated dependencies and small mail fixes
2021-02-03 22:19:18 +01:00
Daniel García
2c515ab13c Merge pull request #1355 from jjlin/global-domains
Sync global_domains.json with upstream
2021-02-03 22:17:57 +01:00
Jeremy Lin
83d556ff0c Sync global_domains.json to bitwarden/server@cf84453 (Disney, Sony) 2021-02-03 12:22:03 -08:00
Jeremy Lin
678d313836 global_domains.py: allow syncing to a specific Git ref 2021-02-03 12:20:44 -08:00
BlackDex
705d840ea3 Extra features for admin interface.
- Able to modify the user type per organization
- Able to remove a whole organization
- Added podman detection
- Only show web-vault update when not running a containerized
  bitwarden_rs

Solves #936
2021-02-03 18:43:54 +01:00
BlackDex
7dff8c01dd JSON Response updates and small fixes
Updated several json response models.
Also fixed a few small bugs.

ciphers.rs:
  - post_ciphers_create:
    * Prevent cipher creation to organization without a collection.
  - update_cipher_from_data:
    * ~~Fixed removal of user_uuid which prevent user-owned shared-cipher to be not editable anymore when set to read-only.~~
    * Cleanup the json_data by removing the `Response` key/values from several objects.
  - delete_all:
    * Do not delete all Collections during the Purge of an Organization (same as upstream).

cipher.rs:
  - Cipher::to_json:
    * Updated json response to match upstream.
    * Return empty json object if there is no type_data instead of values which should not be set for the type_data.

organizations.rs:
  * Added two new endpoints to prevent Javascript errors regarding tax

organization.rs:
  - Organization::to_json:
    * Updated response model to match upstream
  - UserOrganization::to_json:
    * Updated response model to match upstream

collection.rs:
  - Collection::{to_json, to_json_details}:
    * Updated the json response model, and added a detailed version used during the sync
  - hide_passwords_for_user:
    * Added this function to return if the passwords should be hidden or not for the user at the specific collection (used by `to_json_details`)

Update 1: Some small changes after comments from @jjlin.
Update 2: Fixed vault purge by user to make sure the cipher is not part of an organization.

Resolves #971
Closes #990, Closes #991
2021-01-31 21:46:37 +01:00
BlackDex
5860679624 Updated dependencies and small mail fixes
- Updated rust nightly
- Updated depenencies
- Removed unicode support for regex (less dependencies)
- Fixed dependency and nightly changes/deprications
- Some mail changes for less spam point triggering
2021-01-31 20:07:42 +01:00
Daniel García
4628e4519d Update web vault to 2.18.1 2021-01-27 16:08:11 +01:00
Mathijs van Veluw
b884fd20a1 Merge pull request #1333 from jjlin/fix-manager-access
Fix collection access issues for owner/admin users
2021-01-27 08:07:20 +01:00
Jeremy Lin
67c657003d Fix collection access issues for owner/admin users
The implementation of the `Manager` user type (#1242) introduced a regression
whereby owner/admin users are incorrectly denied access to certain collection
APIs if their access control for collections isn't set to "access all".

Owner/admin users should always have full access to collection APIs, per
https://bitwarden.com/help/article/user-types-access-control/#access-control:

> Assigning Admins and Owners to Collections via Access Control will only
> impact which Collections appear readily in the Filters section of their
> Vault. Admins and Owners will always be able to access "un-assigned"
> Collections via the Organization view.
2021-01-26 22:35:09 -08:00
Daniel García
580c1bbc7d Update web vault to 2.18.0 2021-01-25 12:27:57 +01:00
Daniel García
2b6383d243 Merge pull request #1327 from jjlin/dockerfile-cleanup
Dockerfile.j2: clean up web-vault section
2021-01-25 12:24:04 +01:00
Daniel García
f27455a26f Merge pull request #1328 from jjlin/restore-rev-date
Add cipher response to restore operations
2021-01-25 12:23:00 +01:00
Jeremy Lin
1d4f900e48 Add cipher response to restore operations
This matches changes in the upstream Bitwarden server and clients.

Upstream PR: https://github.com/bitwarden/server/pull/1072
2021-01-24 21:57:32 -08:00
Jeremy Lin
c5ca588a6f Dockerfile.j2: clean up web-vault section 2021-01-24 17:26:25 -08:00
Daniel García
06888251e3 Merge pull request #1326 from jjlin/personal-ownership
Add support for the Personal Ownership policy
2021-01-24 14:09:12 +01:00
Daniel García
1a6e4cf4e4 Merge pull request #1321 from mkilchhofer/feature/improve_shutdown_behavior
Improve shutdown behavior (on kubernetes and allow CTRL+C)
2021-01-24 14:06:15 +01:00
Jeremy Lin
9f86196a9d Add support for the Personal Ownership policy
Upstream refs:

* https://github.com/bitwarden/server/pull/1013
* https://bitwarden.com/help/article/policies/#personal-ownership
2021-01-23 20:50:06 -08:00
Marco Kilchhofer
1e31043fb3 Improve shutdown behavior (on kubernetes) 2021-01-22 11:50:24 +01:00
Daniel García
85adcf1ae5 Merge pull request #1316 from BlackDex/admin-interface
Updated the admin interface
2021-01-19 21:58:21 +01:00
Daniel García
9abb4d2873 Merge pull request #1314 from jjlin/image-labels
Add `org.opencontainers` labels to Docker images
2021-01-19 21:53:27 +01:00
BlackDex
235ff44736 Updated the admin interface
Mostly updated the admin interface, also some small other items.

- Added more diagnostic information to (hopefully) decrease issue
  reporting, or at least solve them quicker.
- Added an option to generate a support string which can be used to
  copy/paste on the forum or during the creation of an issue. It will
try to hide the sensitive information automatically.
- Changed the `Created At` and `Last Active` info to be in a column and
  able to sort them in the users overview.
- Some small layout changes.
- Updated javascript and css files to the latest versions available.
- Decreased the png file sizes using `oxipng`
- Updated target='_blank' links to have rel='noreferrer' to prevent
  javascript window.opener modifications.
2021-01-19 17:55:21 +01:00
Jeremy Lin
9c2d741749 Add org.opencontainers labels to Docker images 2021-01-18 01:10:41 -08:00
Daniel García
37cc0c34cf Merge pull request #1304 from jjlin/buildx
Use Docker Buildx for multi-arch builds
2021-01-12 21:51:33 +01:00
Jeremy Lin
5633b6ac94 Use Docker Buildx for multi-arch builds
The bitwarden_rs code is still cross-compiled exactly as before, but Docker
Buildx is used to rewrite the resulting Docker images with correct platform
metadata (reflecting the target platform instead of the build platform).
Buildx also now handles building and pushing the multi-arch manifest lists.
2021-01-09 02:33:36 -08:00
Daniel García
175f2aeace Merge pull request #1270 from BlackDex/update-ci
Updated Github Actions, Fixed Dockerfile
2020-12-17 18:22:46 +01:00
BlackDex
feefe69094 Updated Github Actions, Fixed Dockerfile
- Updated the Github Actions to build just one binary with all DB
  Backends.

- Created a hadolint workflow to check and verify Dockerfiles.
- Fixed current hadolint errors.
- Fixed a bug in the Dockerfile.j2 which prevented the correct libraries
  and tools to be installed on the Alpine images.

- Deleted travis.yml since that is not used anymore
2020-12-16 19:31:39 +01:00
Daniel García
46df3ee7cd Updated insecure ws dependency and general dep updates 2020-12-15 22:23:12 +01:00
Daniel García
bb945ad01b Merge pull request #1243 from BlackDex/fix-key-rotate
Fix Key Rotation during password change.
2020-12-14 20:56:31 +01:00
BlackDex
de86aa671e Fix Key Rotation during password change
When ticking the 'Also rotate my account's encryption key' box, the key
rotated ciphers are posted after the change of password.

During the password change the security stamp was reseted which made
the posted key's return an invalid auth. This reset is needed to prevent other clients from still being able to read/write.

This fixes this by adding a new database column which stores a stamp exception which includes the allowed route and the current security stamp before it gets reseted.
When the security stamp check fails it will check if there is a stamp exception and tries to match the route and security stamp.

Currently it only allows for one exception. But if needed we could expand it by using a Vec<UserStampException> and change the functions accordingly.

fixes #1240
2020-12-14 19:58:23 +01:00
Daniel García
e38771bbbd Merge pull request #1267 from jjlin/datetime-cleanup
Clean up datetime output and code
2020-12-14 18:36:39 +01:00
Daniel García
a3f9a8d7dc Merge pull request #1265 from jjlin/cipher-rev-date
Fix stale data check failure when cloning a cipher
2020-12-14 18:35:17 +01:00
Daniel García
4b6bc6ef66 Merge pull request #1266 from BlackDex/icon-user-agent
Small update on favicon downloading
2020-12-14 18:34:07 +01:00
Jeremy Lin
455a23361f Clean up datetime output and code
* For clarity, add `UTC` suffix for datetimes in the `Diagnostics` admin tab.
* Format datetimes in the local timezone in the `Users` admin tab.
* Refactor some datetime code and add doc comments.
2020-12-13 19:49:22 -08:00
BlackDex
1a8ec04733 Small update on favicon downloading
- Changed the user-agent, which caused at least one site to stall the
  connection (Same happens on icons.bitwarden.com)
- Added default_header creation to the lazy static CLIENT
- Added referer passing, which is checked by some sites
- Some small other changes
2020-12-10 23:13:24 +01:00
Jeremy Lin
4e60df7a08 Fix stale data check failure when cloning a cipher 2020-12-10 00:17:34 -08:00
Daniel García
219a9d9f5e Merge pull request #1262 from BlackDex/icon-fixes
Updated icon downloading.
2020-12-08 18:05:05 +01:00
BlackDex
48baf723a4 Updated icon downloading
- Added more checks to prevent panics (Removed unwrap)
- Try do download from base domain or add www when the provided domain
  fails
- Added some more domain validation checks to prevent errors
- Added the ICON_BLACKLIST_REGEX to a Lazy Static HashMap which
  speeds-up the checks!
- Validate the Regex before starting/config change.
- Some cleanups
- Disabled some noisy debugging from 2 crates.
2020-12-08 17:34:18 +01:00
Daniel García
6530904883 Update web vault version to 2.17.1 2020-12-08 16:43:19 +01:00
Daniel García
d15d24f4ff Merge pull request #1242 from BlackDex/allow-manager-role
Adding Manager Role support
2020-12-08 16:11:55 +01:00
Daniel García
8d992d637e Merge pull request #1257 from jjlin/cipher-rev-date
Validate cipher updates with revision date
2020-12-08 15:59:21 +01:00
Daniel García
6ebc83c3b7 Merge pull request #1247 from janost/admin-disable-user
Implement admin ability to enable/disable users
2020-12-08 15:43:56 +01:00
Daniel García
b32f4451ee Merge branch 'master' into admin-disable-user 2020-12-08 15:42:37 +01:00
Daniel García
99142c7552 Merge pull request #1252 from BlackDex/update-dependencies-20201203
Updated dependencies and Dockerfiles
2020-12-08 15:33:41 +01:00
Daniel García
db710bb931 Merge pull request #1245 from janost/user-last-login
Show last active it on admin users page
2020-12-08 15:31:25 +01:00
Jeremy Lin
a9e9a397d8 Validate cipher updates with revision date
Prevent clients from updating a cipher if the local copy is stale.
Validation is only performed when the client provides its last known
revision date; this date isn't provided when using older clients,
or when the operation doesn't involve updating an existing cipher.

Upstream PR: https://github.com/bitwarden/server/pull/994
2020-12-07 19:34:00 -08:00
BlackDex
d46a6ac687 Updated dependencies and Dockerfiles
- Updated crates
- Updated rust-toolchain
- Updated Dockerfile to use latest rust 1.48 version
- Updated AMD64 Alpine to use same version as rust-toolchain and support
  PostgreSQL.
- Updated Rocket to the commit right before they updated hyper.
  Until that update there were some crates updated and some small fixes.
  After that build fails and we probably need to make some changes
(which is probably something already done in the async branch)
2020-12-04 13:38:42 +01:00
janost
1eb5495802 Show latest active device as last active on admin page 2020-12-03 17:07:32 +01:00
BlackDex
7cf8809d77 Adding Manager Role support
This has been requested a few times (#1136 & #246 & forum), and there already were two
(1:1 duplicate) PR's (#1222 & #1223) which needed some changes and no
followups or further comments unfortunally.

This PR adds two auth headers.
- ManagerHeaders
  Checks if the user-type is Manager or higher and if the manager is
part of that collection or not.
- ManagerHeadersLoose
  Check if the user-type is Manager or higher, but does not check if the
user is part of the collection, needed for a few features like
retreiving all the users of an org.

I think this is the safest way to implement this instead of having to
check this within every function which needs this manually.

Also some extra checks if a manager has access to all collections or
just a selection.

fixes #1136
2020-12-02 22:50:51 +01:00
janost
043aa27aa3 Implement admin ability to enable/disable users 2020-11-30 23:12:56 +01:00
Daniel García
9824d94a1c Merge pull request #1244 from janost/read-config-from-files
Read config vars from files
2020-11-29 15:28:13 +01:00
janost
e8ef76b8f9 Read config vars from files 2020-11-29 02:31:49 +01:00
Daniel García
be1ddb4203 Merge pull request #1234 from janost/fix-failed-auth-log
Log proper namespace in the err!() macro
2020-11-27 18:49:46 +01:00
janost
caddf21fca Log proper namespace in the err!() macro 2020-11-22 00:09:45 +01:00
Daniel García
5379329ef7 Merge pull request #1229 from BlackDex/email-fixes
Email fixes
2020-11-18 16:16:27 +01:00
BlackDex
6faaeaae66 Updated email processing.
- Added an option to enable smtp debugging via SMTP_DEBUG. This will
  trigger a trace of the smtp commands sent/received to/from the mail
server. Useful when troubleshooting.
- Added two options to ignore invalid certificates which either do not
  match at all, or only doesn't match the hostname.
- Updated lettre to the latest alpha.4 version.
2020-11-18 12:07:08 +01:00
BlackDex
3fed323385 Fixed plain/text email format
plain/text emails should not contain html elements like <p> <a> etc..
This triggers some spamfilters and increases the spam score.

Also added the github link into the text only emails since this also
triggers spamfilters to increase the score since the url/link count is
different between the multipart messages.
2020-11-18 12:04:16 +01:00
BlackDex
58a928547d Updated admin settings page.
- Added check if settings are changed but not saved when sending test
  email.
- Added some styling to emphasize some risks settings.
- Fixed alignment of elements when the label has multiple lines.
2020-11-18 12:00:25 +01:00
Daniel García
558410c5bd Merge pull request #1220 from jameshurst/master
Return 404 instead of fallback icon
2020-11-14 14:17:53 +01:00
Daniel García
0dc0decaa7 Merge pull request #1212 from BlackDex/dotenv-warnings
Added error handling during dotenv loading
2020-11-14 14:11:56 +01:00
BlackDex
d11d663c5c Added error handling during dotenv loading
Some issue people report are because of misconfiguration or bad .env
files. To mittigate this i added error handling for this.

- Panic/Quit on a LineParse error, which indicates bad .env file format.
- Emits a info message when there is no .env file found.
- Emits a warning message when there is a .env file, but not no
  permissions.
- Emits a warning on every other message not specifically catched.
2020-11-12 13:40:26 +01:00
James Hurst
771233176f Fix for negcached icons 2020-11-09 22:06:11 -05:00
James Hurst
ed70b07d81 Return 404 instead of fallback icon 2020-11-09 20:47:26 -05:00
Daniel García
e25fc7083d Merge pull request #1219 from aveao/master
Ensure that a user is actually in an org when applying policies
2020-11-07 23:29:12 +01:00
Ave
fa364c3f2c Ensure that a user is actually in an org when applying policies 2020-11-08 01:14:17 +03:00
Daniel García
b5f9fe4d3b Fix #1206 2020-11-07 23:03:02 +01:00
Daniel García
013d4c28b2 Try to fix #1218 2020-11-07 23:01:56 +01:00
Daniel García
63acc8619b Update dependencies 2020-11-07 23:01:04 +01:00
Daniel García
ec920b5756 Merge pull request #1199 from jjlin/delete-admin
Add missing admin endpoints for deleting ciphers
2020-10-23 14:18:41 +02:00
Jeremy Lin
95caaf2a40 Add missing admin endpoints for deleting ciphers
This fixes the inability to bulk-delete ciphers from org vault views.
2020-10-23 03:42:22 -07:00
Mathijs van Veluw
7099f8bee8 Merge pull request #1198 from fabianvansteen/patch-1
Correction of verify_email error message
2020-10-23 11:40:39 +02:00
Fabian van Steen
b41a0d840c Correction of verify_email error message 2020-10-23 10:30:25 +02:00
Daniel García
c577ade90e Updated dependencies 2020-10-15 23:44:35 +02:00
Daniel García
257b143df1 Remove some duplicate code in Dockerfile with the help of some variables 2020-10-11 17:27:15 +02:00
Daniel García
34ee326ce9 Merge pull request #1178 from BlackDex/update-azure-pipelines
Updated the azure-pipelines.yml for multidb
2020-10-11 17:25:15 +02:00
Daniel García
090104ce1b Merge pull request #1181 from BlackDex/update-issue-template
Updated bug-report to note to update first
2020-10-11 17:24:42 +02:00
BlackDex
3305d5dc92 Updated bug-report to note to update first 2020-10-11 15:58:31 +02:00
Daniel García
296063e135 Merge pull request #1108 from rfwatson/add-db-connections-setting
Add DATABASE_MAX_CONNS config setting
2020-10-10 00:25:03 +02:00
Rob Watson
b9daa59e5d Add DATABASE_MAX_CONNS config setting 2020-10-09 10:29:02 +02:00
BlackDex
5bdcfe128d Updated the azure-pipelines.yml for multidb
Updated the azure-pipelines.yml to build multidb now.
- Updated to Ubuntu 18.04 (Closer matches the docker builds)
- Added some really needed apt packages to be sure that they are
  installed
- Now run cargo test using all database backeds in one go.
2020-10-08 18:48:05 +02:00
Daniel García
1842a796fb Merge pull request #1176 from BlackDex/fix-alpine-arm-docker
Fixed issue with building Alpine armv7 image.
2020-10-08 16:05:03 +02:00
BlackDex
ce99e5c583 Fixed issue with building Alpine armv7 image.
The runtime image was using a very old Alpine version.
This caused issues with the catatonit install

Now using the Balena armv7hf Alpine image for this.
2020-10-08 13:12:58 +02:00
Daniel García
0c96c2d305 Merge pull request #1171 from BlackDex/arm-multidb
Fixed building mysql, postgresql and sqlite3 for arm
2020-10-07 23:20:16 +02:00
Daniel García
5796b6b554 Merge pull request #1172 from BlackDex/missing-dotenv-config
Updated .env.template with missing config values.
2020-10-07 23:19:38 +02:00
BlackDex
c7ab27c86f Updated .env.template with missing config values.
Several values were missing from the .env.template file.
Updated this with all the values which were missing.
2020-10-06 18:54:21 +02:00
BlackDex
8c03746a67 Fixed building mysql, postgresql and sqlite3 for arm
With some apt/dpkg magic building multidb containers for arm versions
now also works. As long as the build stage and docker-image stage use
the same base (debian buster now) it should all work.

Resolves #530, resolves #1066
2020-10-06 18:04:53 +02:00
Daniel García
8746d36845 Document database connection retries and change alpine repo for catatonit
(cherry picked from commit 88e3835050c0418c060c8e3a704894763ee33aa0)
2020-10-04 14:14:26 +02:00
Daniel García
448e6ac917 Invalidate sessions when changing password or kdf values 2020-10-03 22:43:13 +02:00
Daniel García
729c9cff41 Retry initial db connection, with adjustable option 2020-10-03 22:32:00 +02:00
Daniel García
22b9c80007 Reorganize dockerfile template slightly (same result) 2020-10-03 20:59:48 +02:00
Daniel García
ab4355cfed Updated web vault, dependencies and base docker images 2020-10-03 20:50:13 +02:00
Daniel García
948dc82228 Updated sponsors 2020-10-03 20:48:02 +02:00
Daniel García
bc74fd23e7 Merge pull request #1151 from Start9Labs/feature/alpine-arm
alpine arm dockerfile
2020-10-03 20:47:42 +02:00
Daniel García
37776241be Merge pull request #1150 from BlackDex/mariadb-fk-issues
Fixed foreign-key (mariadb) errors.
2020-09-27 16:24:34 +02:00
Daniel García
feba41ec88 Merge pull request #1160 from eduardosm/vendored_openssl
Add `vendored_openssl` feature.
2020-09-27 16:24:11 +02:00
Aiden McClelland
6a8f42da8a specify version of cmosh's alpine-arm
Co-authored-by: Daniel García <dani-garcia@users.noreply.github.com>
2020-09-26 14:03:20 -06:00
Aiden McClelland
670d8cb83a add arm target to alpine container 2020-09-26 14:02:47 -06:00
Eduardo Sánchez Muñoz
2f7fbde789 Add vendored_openssl feature.
This feature enables the `vendored` feature from the `openssl` crate and build a statically linked version of openssl.
2020-09-25 23:25:53 +02:00
Mathijs van Veluw
c698bca2b9 Merge branch 'master' into mariadb-fk-issues 2020-09-25 22:25:57 +02:00
Daniel García
0b6a003a8b Merge pull request #1158 from BlackDex/fix-issue-1156
Add /api/accounts/verify-password endpoint
2020-09-25 21:14:25 +02:00
BlackDex
c64560016e Add /api/accounts/verify-password endpoint
If for some reason the hashed password is cleared from memory within a
bitwarden client it will try to verify the password at the server side.

This endpoint was missing.

Resolves #1156
2020-09-25 18:26:48 +02:00
BlackDex
978be0b4a9 Fixed foreign-key (mariadb) errors.
When using MariaDB v10.5+ Foreign-Key errors were popping up because of
some changes in that version. To mitigate this on MariaDB and other
MySQL forks those errors are now catched, and instead of a replace_into
an update will happen. I have tested this as thorough as possible with
MariaDB 10.5, 10.4, 10.3 and the default MySQL on Ubuntu Focal. And
tested it again using sqlite, all seems to be ok on all tables.

resolves #1081. resolves #1065, resolves #1050
2020-09-22 12:13:02 +02:00
Aiden McClelland
b58bff1178 alpine arm building successfully 2020-09-21 16:39:39 -06:00
Daniel García
2f3e18caa9 Merge pull request #1146 from BlackDex/user-orgs-table-enhancement
Enhanced user and orgs tables in admin view.
2020-09-20 16:48:19 +02:00
BlackDex
6a291040bd As requested here: https://bitwardenrs.discourse.group/t/searchable-user-list-on-admin-panel/299
- Changed the table layout a bit.
- Added functions to the tables:
  + Search
  + Sort
  + Paginate
2020-09-19 22:19:55 +02:00
Daniel García
dbc082dc75 Update web vault to 2.16.0 and dependencies 2020-09-19 22:01:14 +02:00
Daniel García
32a0dd09bf Merge pull request #1148 from BlackDex/update-config-descriptions
Updated the config options descriptions.
2020-09-19 21:38:01 +02:00
BlackDex
f847c6e225 Updated the config options descriptions.
Made some small changes to the description of the config options for
SMTP. Some were a bit cryptic and missing some extra descriptions.

Also made it more clear which type of secured smtp connection is going
to used.
2020-09-19 17:09:58 +02:00
Daniel García
99da5fbebb Merge pull request #1143 from BlackDex/better-lettre-errors
Format some common Lettre errors a bit simpler
2020-09-14 22:18:47 +02:00
BlackDex
6a0d024c69 Format some common Lettre errors a bit simpler
Currently when for example using the admin interface to send out a test e-mail just
returns `SmtpError`. This is not very helpful. What i have done.

- Match some common Lettre errors to return the error message.
- Other errors will just be passed on as before.

Some small other changes:
- Fixed a clippy warning about using clone().
- Fixed a typo where Lettere was spelled with one t.
2020-09-14 20:47:46 +02:00
Daniel García
b24929a243 Merge pull request #1141 from BlackDex/fix-org-creation
Fixed creating a new organization
2020-09-14 18:01:09 +02:00
BlackDex
9a47821642 Fixed creating a new organization
- The new web-vault needs a new api endpoint.
- Added this new endpoint.

Fixes #1139
2020-09-14 08:34:17 +02:00
Daniel García
d69968313b Merge pull request #1140 from jjlin/UserOrgType-cmp
Simplify implementation of `UserOrgType::cmp()`
2020-09-13 15:10:54 +02:00
Daniel García
3c377d97dc Merge pull request #1137 from BlackDex/smtp-multi-auth-mechanism
Allow multiple SMTP Auth meganisms.
2020-09-13 15:09:58 +02:00
Daniel García
ea15218197 Merge pull request #1135 from BlackDex/update-mail
Updated lettre (and other crates) and workflow.
2020-09-13 15:08:01 +02:00
Jeremy Lin
0eee907c88 Simplify implementation of UserOrgType::cmp()
Also move `UserOrgType::from_str()` closer to the definition of `UserOrgType`
since it references specific enum values.
2020-09-13 02:03:16 -07:00
BlackDex
c877583979 Allow multiple SMTP Auth meganisms.
- Allow all SMTP Auth meganisms supported by Lettre.
- The config value order is leading and values can be separated by a
  comma ','
- Case doesn't matter, and invalid values are ignored.
- Warning is printed when no valid value is found at all.
2020-09-12 21:47:24 +02:00
BlackDex
844cf70345 Updated lettre (and other crates) and workflow.
General:
- Updated several dependancies

Lettre:
- Updateded lettere and the workflow
- Changed encoding to base64
- Convert unix newlines to dos newlines for e-mails.
- Created custom e-mail boundary (auto generated could cause errors)

Tested the e-mails sent using several clients (Linux, Windows, MacOS, Web).
Run msglint (https://tools.ietf.org/tools/msglint/) on the generated e-mails until all errors were gone.

Lettre has changed quite some stuff compared between alpha.1 and alpha.2, i haven't noticed any issues sending e-mails during my tests.
2020-09-11 23:52:20 +02:00
Daniel García
a0d92a167c Merge pull request #1125 from jjlin/org-cipher-visibility
Hide ciphers from non-selected collections for org owners/admins
2020-09-10 23:19:14 +02:00
Daniel García
d7b0d6f9f5 Merge pull request #1124 from aaxdev/fix/support-mobile-signalr-msgpack
Fixing MsgPack headers type and support mobile SignalR
2020-09-03 19:51:04 +02:00
Jeremy Lin
4c3b328aca Hide ciphers from non-selected collections for org owners/admins
If org owners/admins set their org access to only include selected
collections, then ciphers from non-selected collections shouldn't
appear in "My Vault". This matches the upstream behavior.
2020-09-01 02:20:25 -07:00
aaxdev
260ffee093 Improving code 2020-08-31 22:20:21 +02:00
aaxdev
c59cfe3371 Fix MsgPack headers and support mobile SignalR 2020-08-31 19:05:07 +02:00
Daniel García
0822c0c128 Update admin page dependencies 2020-08-31 16:40:21 +02:00
Daniel García
57a88f0a1b Merge pull request #1119 from mqus/clarify-multiuser-docs
Clarify multiuser capabilities
2020-08-30 15:44:36 +02:00
Markus Richter
87393409f9 Clean up misunderstandings by removing the single-user point 2020-08-29 12:47:16 +02:00
Daniel García
062f5e4712 Update dependencies 2020-08-28 22:11:55 +02:00
Daniel García
aaba1e8368 Fix some clippy warnings and remove unused function 2020-08-28 22:10:28 +02:00
Daniel García
ff2684dfee Merge pull request #1116 from jjlin/docker
Fix the Alpine build
2020-08-27 12:34:36 +02:00
Jeremy Lin
6b5fa201aa Fix the Alpine build 2020-08-26 23:44:34 -07:00
Daniel García
7167e443ca Merge pull request #1115 from jjlin/favorites
Delete associated favorites when deleting a cipher or user
2020-08-26 17:54:50 +02:00
Jeremy Lin
175d647e47 Delete associated favorites when deleting a cipher or user
This prevents foreign key constraint violations.
2020-08-26 01:27:38 -07:00
Daniel García
4c324e1160 Change Dockerfiles to make the AMD image multidb 2020-08-24 20:58:00 +02:00
Daniel García
0365b7c6a4 Add support for multiple simultaneous database features by using macros.
Diesel requires the following changes:
- Separate connection and pool types per connection, the generate_connections! macro generates an enum with a variant per db type
- Separate migrations and schemas, these were always imported as one type depending on db feature, now they are all imported under different module names
- Separate model objects per connection, the db_object! macro generates one object for each connection with the diesel macros, a generic object, and methods to convert between the connection-specific and the generic ones
- Separate connection queries, the db_run! macro allows writing only one that gets compiled for all databases or multiple ones
2020-08-24 20:11:17 +02:00
Daniel García
19889187a5 Merge pull request #1106 from jjlin/favorites
Track favorites on a per-user basis
2020-08-24 00:31:48 +02:00
Daniel García
9571277c44 Merge pull request #1112 from jjlin/token-size-docs
Add more docs on the `email_token_size` setting
2020-08-24 00:25:47 +02:00
Daniel García
a202da9e23 Merge pull request #1099 from jjlin/global-domains
Sync global_domains.json with upstream
2020-08-24 00:25:33 +02:00
Daniel García
e5a77a477d Merge pull request #1111 from jjlin/token
Generate tokens more simply and uniformly
2020-08-24 00:25:12 +02:00
Jeremy Lin
c05dc50f53 Add more docs on the email_token_size setting 2020-08-22 17:35:55 -07:00
Jeremy Lin
3bbdbb832c Transfer favorite status for user-owned ciphers 2020-08-22 17:14:05 -07:00
Jeremy Lin
d9684bef6b Generate tokens more simply and uniformly 2020-08-22 16:07:53 -07:00
Jeremy Lin
db0c45c172 Sync global_domains.json to bitwarden/server@8383a08 (Yandex) 2020-08-20 03:31:21 -07:00
Jeremy Lin
ad4393e3f7 Sync global_domains.json to bitwarden/server@80f57d2 (Amazon updates) 2020-08-20 03:30:39 -07:00
Jeremy Lin
f83a8a36d1 Track favorites on a per-user basis
Currently, favorites are tracked at the cipher level. For org-owned ciphers,
this means that if one user sets it as a favorite, it automatically becomes a
favorite for all other users that the cipher has been shared with.
2020-08-19 02:32:58 -07:00
Jeremy Lin
0e9eba8c8b Maximize similarity between MySQL and SQLite/PostgreSQL schemas
In particular, Diesel aliases `Varchar` to `Text`, and `Blob` to `Binary`:

* https://docs.diesel.rs/diesel/sql_types/struct.Text.html
* https://docs.diesel.rs/diesel/sql_types/struct.Binary.html
2020-08-19 02:32:56 -07:00
Jeremy Lin
d5c760960a Sync global_domains.json to bitwarden/server@af85e17 (eBay India updates) 2020-08-19 00:40:59 -07:00
Jeremy Lin
2c6ef2bc68 Sync global_domains.json to bitwarden/server@2c43019 (eBay updates) 2020-08-15 01:34:12 -07:00
Jeremy Lin
7032ae5587 Sync global_domains.json to bitwarden/server@6aed80a (Amazon updates) 2020-08-15 01:32:56 -07:00
Daniel García
eba22c2d94 Merge pull request #1095 from jjlin/db-docs
Add more doc comments for MySQL/PostgreSQL connection URIs
2020-08-13 22:34:46 +02:00
Daniel García
11cc9ae0c0 Merge pull request #1094 from jjlin/master
Sync global_domains.json to bitwarden/server@61b11e3
2020-08-13 22:34:20 +02:00
Daniel García
fb648db47d Merge pull request #1096 from mqus/mqus-env-document-override
Add a note that settings in .env can be overridden
2020-08-13 22:34:03 +02:00
mqus
959283d333 Add a note that settings in .env can be overridden 2020-08-13 17:49:25 +02:00
Jeremy Lin
385c2227e7 Add more doc comments for MySQL/PostgreSQL connection URIs
Note that Diesel implements its own parser for MySQL connection URIs, so it
probably doesn't accept the full range of syntax that would be accepted by
MySQL's client libraries, whereas for PostgreSQL, Diesel simply passes the
connection string/URI to PostgreSQL's libpq for processing.
2020-08-13 02:33:22 -07:00
Jeremy Lin
6d9f03e84b Sync global_domains.json to bitwarden/server@61b11e3 2020-08-12 21:10:31 -07:00
Daniel García
6a972e4b19 Make the admin URL redirect try to use the referrer first, and use /admin when DOMAIN is not configured and the referrer check doesn't work, to allow users without DOMAIN configured to use the admin page correctly 2020-08-12 19:07:52 +02:00
Daniel García
171b174ce9 Update dependencies 2020-08-12 18:46:28 +02:00
Daniel García
93b7ded1e6 Remove unneccessary shim for backtrace 2020-08-12 18:45:26 +02:00
Daniel García
29c6b145ca Remove redundant user fetching from login 2020-08-11 16:48:15 +02:00
Daniel García
a7a479623c Merge pull request #1087 from jjlin/org-creation-users
Add support for restricting org creation to certain users
2020-08-08 16:20:15 +02:00
Daniel García
83dff9ae6e Merge pull request #1083 from jjlin/global-domains
Add a script to auto-generate the global equivalent domains JSON file
2020-08-08 16:19:30 +02:00
Daniel García
6b2cc5a3ee Merge pull request #1089 from jjlin/master
Don't push `latest-arm32v6` tag for MySQL and PostgreSQL images
2020-08-07 20:39:17 +02:00
Jeremy Lin
5247e0d773 Don't push latest-arm32v6 tag for MySQL and PostgreSQL images 2020-08-07 10:15:15 -07:00
Jeremy Lin
05b308b8b4 Sync global_domains.json with upstream 2020-08-06 12:13:40 -07:00
Jeremy Lin
9621278fca Add a script to auto-generate the global equivalent domains JSON file
The script works by reading the relevant files from the upstream Bitwarden
source repo and generating a matching JSON file. It could potentially be
integrated into the build/release process, but for now it can be run manually
as needed.
2020-08-06 12:12:32 -07:00
Jeremy Lin
570d6c8bf9 Add support for restricting org creation to certain users 2020-08-05 22:35:29 -07:00
Daniel García
ad48e9ed0f Fix unlock on desktop clients 2020-08-04 15:12:04 +02:00
Daniel García
f724addf9a Merge pull request #1076 from jjlin/soft-delete
Fix soft delete notifications
2020-07-28 17:44:33 +02:00
Daniel García
aa20974703 Merge pull request #1075 from jjlin/master
Push an extra `latest-arm32v6` tag
2020-07-28 17:43:59 +02:00
Jeremy Lin
a846f6c610 Fix soft delete notifications
A soft-deleted entry should now show up in the trash folder immediately
(previously, an extra sync was required).
2020-07-26 16:19:47 -07:00
Jeremy Lin
c218c34812 Push an extra latest-arm32v6 tag
This fixes a gap in PR #1069.
2020-07-26 15:28:14 -07:00
Daniel García
2626e66873 Merge pull request #1069 from jjlin/master
Skip cleanup of `arm32v6` arch-specific tags
2020-07-24 23:05:29 +02:00
Jeremy Lin
81e0e1b339 Skip cleanup of arm32v6 arch-specific tags 2020-07-24 11:32:44 -07:00
Daniel García
fd1354d00e Merge pull request #1067 from jjlin/log-time-fmt
Add config option for log timestamp format
2020-07-24 16:42:10 +02:00
Jeremy Lin
071a3b2a32 Log timestamps with milliseconds by default 2020-07-23 14:19:51 -07:00
Daniel García
32cfaab5ee Updated dependencies and changed rocket request imports 2020-07-23 21:07:04 +02:00
Jeremy Lin
d348f12a0e Add config option for log timestamp format 2020-07-22 21:50:49 -07:00
Daniel García
11845d9f5b Merge pull request #1061 from jjlin/use-strip-prefix
Use `strip_prefix()` instead of `trim_start_matches()` as appropriate
2020-07-21 16:31:31 +02:00
Jeremy Lin
de70fbf88a Use strip_prefix() instead of trim_start_matches() as appropriate
As of Rust 1.45.0, `strip_prefix()` is now stable.
2020-07-20 22:33:13 -07:00
Daniel García
0b04caab78 Merge pull request #1029 from jjlin/multi-arch
Multi-arch image support
2020-07-16 22:59:12 +02:00
Jeremy Lin
4c78c5a9c9 Tag latest releases as latest and alpine 2020-07-15 20:03:34 -07:00
Jeremy Lin
73f0841f17 Clean up arch-specific tags if Docker Hub credentials are provided 2020-07-15 20:03:34 -07:00
Jeremy Lin
4559e85daa Multi-arch image support 2020-07-15 20:03:34 -07:00
Jeremy Lin
bbef332e25 Dockerfile.j2: remove dead code 2020-07-15 20:03:34 -07:00
Daniel García
1e950c7dbc Replace IP support in preparation for compiling on stable, included some tests to check that the code matches the unstable implementation 2020-07-15 00:00:03 +02:00
Daniel García
f14e19a3d8 Don't compile the regexes each time 2020-07-14 21:58:27 +02:00
Daniel García
668d5c23dc Removed try_trait and some formatting, particularly around imports 2020-07-14 18:34:22 +02:00
Daniel García
fb6f96f5c3 Updated dependencies 2020-07-14 16:08:11 +02:00
Daniel García
6e6e34ff18 Merge pull request #1055 from jjlin/pg
Fix error in PostgreSQL build
2020-07-11 11:17:45 +02:00
Jeremy Lin
790146bfac Fix error in PostgreSQL build 2020-07-10 17:23:02 -07:00
Daniel García
af625930d6 Merge pull request #1049 from jjlin/local-tz
Use local time in email notifications for new device logins
2020-07-08 19:39:17 +02:00
Jeremy Lin
a28ebcb401 Use local time in email notifications for new device logins
In this implementation, the `TZ` environment variable must be set
in order for the formatted output to use a more user-friendly
time zone abbreviation (e.g., `UTC`). Otherwise, the output uses
the time zone's UTC offset (e.g., `+00:00`).
2020-07-07 21:30:18 -07:00
Daniel García
77e47ddd1f Merge pull request #1042 from jjlin/hide-passwords
Add support for hiding passwords in a collection
2020-07-06 18:56:06 +02:00
Daniel García
5b620ba6cd Merge pull request #1048 from jjlin/init
Add startup script to support init operations
2020-07-06 18:15:18 +02:00
Jeremy Lin
d5f9b33f66 Add startup script to support init operations
This is useful for making local customizations upon container start. To use
this feature, mount a script into the container as `/etc/bitwarden_rs.sh`
and/or a directory of scripts as `/etc/bitwarden_rs.d`. In the latter case,
only files with an `.sh` extension are sourced, so files with other
extensions (e.g., data/config files) can reside in the same dir.

Note that the init scripts are run each time the container starts (not just
the first time), so these scripts should be idempotent.
2020-07-05 15:26:20 -07:00
Daniel García
596c9b8691 Add option to set name during HELO in email settings 2020-07-05 01:59:15 +02:00
Daniel García
d4357eb55a Updated dependencies ans web vault version 2020-07-05 01:38:16 +02:00
Daniel García
b37f0dfde3 Merge pull request #1044 from ArmaanT/master
Allow postgres:// in DATABASE_URL
2020-07-05 01:07:29 +02:00
Armaan Tobaccowalla
624791e09a Allow postgres:// DATABASE_URL 2020-07-04 16:13:27 -04:00
Jeremy Lin
f9a73a9bbe More cipher optimization/cleanup 2020-07-03 10:49:10 -07:00
Jeremy Lin
35868dd72c Optimize cipher queries 2020-07-03 09:00:33 -07:00
Jeremy Lin
979d010dc2 Add support for hiding passwords in a collection
Ref: https://github.com/bitwarden/server/pull/743
2020-07-02 21:51:20 -07:00
Daniel García
b34d548246 Update dependencies 2020-06-22 17:15:20 +02:00
Daniel García
a87646b8cb Some format changes to main.rs 2020-06-15 23:40:39 +02:00
Daniel García
a2411eef56 Updated dependencies 2020-06-15 23:04:52 +02:00
Daniel García
52ed8e4d75 Merge pull request #1026 from BlackDex/issue-1022
Fixes #1022 cloning with attachments
2020-06-07 19:53:47 +02:00
BlackDex
24c914799d Fixes #1022 cloning with attachments
When a cipher has one or more attachments it wasn't able to be cloned.
This commit fixes that issue.
2020-06-07 17:57:04 +02:00
Daniel García
db53511855 Merge pull request #1020 from BlackDex/admin-interface
Fixed wrong status if there is an update.
2020-06-04 18:50:00 +02:00
BlackDex
325691e588 Fixed wrong status if there is an update.
- Checking the sha hash first if this is also in the server version.
- Added a badge to show if you are on a branched build.
2020-06-04 17:05:17 +02:00
Daniel García
fac3cb687d Merge pull request #1019 from xoxys/master
Add back openssl crate
2020-06-04 01:24:28 +02:00
Robert Kaussow
afbf1db331 add back openssl crate 2020-06-04 01:21:30 +02:00
Daniel García
1aefaec297 Merge pull request #1018 from BlackDex/admin-interface
Admin interface
2020-06-03 22:48:03 +02:00
Daniel García
f1d3fb5d40 Merge pull request #1017 from dprobinson/patch-1
Added missing ENV Variable for Implicit TLS
2020-06-03 22:47:53 +02:00
BlackDex
ac2723f898 Updated Organizations overview
- Changed HTML to match users overview
- Added User count
- Added Org cipher amount
- Added Attachment count and size
2020-06-03 20:37:31 +02:00
BlackDex
2fffaec226 Added attachment info per user and some layout fix
- Added the amount and size of the attachments per user
- Changed the items count function a bit
- Some small layout changes
2020-06-03 17:57:03 +02:00
BlackDex
5c54dfee3a Fixed an issue when DNS resolving fails.
In the event of a failed DNS Resolving checking for new versions will
cause a huge delay, and in the end a timeout when loading the page.

- Check if DNS resolving failed, if that is the case, do not check for
  new versions
- Changed `fn get_github_api` to make use of structs
- Added a timeout of 10 seconds for the version check requests
- Moved the "Unknown" lables to the "Latest" lable
2020-06-03 17:07:32 +02:00
David P Robinson
967d2d78ec Added missing ENV Variable for Implicit TLS 2020-06-02 23:46:26 +01:00
Daniel García
1aa5e0d4dc Merge pull request #1012 from BlackDex/admin-interface
Updated js/css libraries and fixed smallscreen err
2020-06-01 20:07:13 +02:00
BlackDex
b47cf97409 Updated js/css libraries and fixed smallscreen err
- Updated bootstrap js and css to the latest version
- Fixed issue with small-screens where the menu overlaps the token input
  - The menu now collapses to a hamburger menu
  - Menu's only accessable when logedin are hidden when you are not
- Changed Users Overview to use a table to prevent small-screen issues.
2020-06-01 18:58:38 +02:00
Daniel García
5e802f8aa3 Update lettre to alpha release instead of git commit, and update the rest of dependencies while we are at it 2020-05-31 17:58:06 +02:00
Daniel García
0bdeb02a31 Merge pull request #1009 from jjlin/email-subject
Don't HTML-escape email subject lines
2020-05-31 00:22:58 +02:00
Daniel García
b03698fadb Merge pull request #1010 from jjlin/admin-url
Avoid double-slashes in the admin URL
2020-05-31 00:22:46 +02:00
Jeremy Lin
39d1a09704 Avoid double-slashes in the admin URL 2020-05-30 01:06:40 -07:00
Jeremy Lin
a447e4e7ef Don't HTML-escape email subject lines
For example, this causes org names like `X&Y` to appear as `X&amp;Y`.
2020-05-30 00:36:43 -07:00
Daniel García
4eee6e7aee Merge pull request #1007 from BlackDex/admin-interface
Admin interface restyle
2020-05-28 20:54:11 +02:00
BlackDex
b6fde857a7 Added version check to diagnostics
- Added a version check based upon the github api information.
2020-05-28 20:25:25 +02:00
BlackDex
3c66deb5cc Redesign of the admin interface.
Main changes:
 - Splitted up settings and users into two separate pages.
 - Added verified shield when the e-mail address has been verified.
 - Added the amount of personal items in the database to the users overview.
 - Added Organizations and Diagnostics pages.
   - Shows if DNS resolving works.
   - Shows if there is a posible time drift.
   - Shows current versions of server and web-vault.
 - Optimized logo-gray.png using optipng

Items which can be added later:
 - Amount of cipher items accessible for a user, not only his personal items.
 - Amount of users per Org
 - Version update check in the diagnostics overview.
 - Copy/Pasteable runtime config which has sensitive data changed or removed for support questions either on the forum or github issues.
 - Option to delete Orgs and all its passwords (when there are no members anymore).
 - Etc....
2020-05-28 10:46:25 +02:00
Daniel García
4146612a32 Merge pull request #1006 from jjlin/email-change
Allow email changes for existing accounts even when signups are disabled
2020-05-27 18:18:21 +02:00
Jeremy Lin
a314933557 Allow email changes for existing accounts even when signups are disabled 2020-05-24 14:38:19 -07:00
Daniel García
c5d7e3f2bc Merge pull request #1003 from frdescam/fix_arm_displaysize
Use format! for rounding to fix arm issue
2020-05-23 13:10:06 +02:00
Daniel García
c95a2881b5 Merge pull request #998 from frdescam/fix_email_templates
Fixing bad width in 2FA email template
2020-05-23 13:09:44 +02:00
fdeĉ
4c3727b4a3 use format! for rounding to fix arm issue 2020-05-22 12:10:56 +02:00
Daniel García
a1f304dff7 Update web vault to v2.14.0 2020-05-21 22:49:15 +02:00
Daniel García
a8870eef0d Convert to f32 before rounding to fix arm issue 2020-05-20 17:58:39 +02:00
François
afaebc6cf3 fixing hard coded width email templates 2020-05-20 13:38:04 +02:00
François
8f4a1f4fc2 fixing bad width in 2FA email template 2020-05-18 12:27:21 +02:00
Daniel García
0807783388 Add ip on totp miss 2020-05-14 00:19:50 +02:00
Daniel García
80d4061d14 Update dependencies 2020-05-14 00:18:18 +02:00
Daniel García
dc2f8e5c85 Merge pull request #994 from jjlin/help-text
Update startup banner to direct usage/config questions to the forum
2020-05-13 22:34:30 +02:00
Daniel García
aee1ea032b Merge pull request #989 from theycallmesteve/update_responses
Update responses
2020-05-13 22:34:16 +02:00
Daniel García
484e82fb9f Merge pull request #988 from theycallmesteve/rename_functions
Rename functions
2020-05-13 22:34:06 +02:00
Jeremy Lin
322a08edfb Update startup banner to direct usage/config questions to the forum 2020-05-13 12:29:47 -07:00
theycallmesteve
08afc312c3 Add missing items to profileOrganization response model 2020-05-08 13:39:17 -04:00
theycallmesteve
5571a5d8ed Update post_keys to return a keys response model 2020-05-08 13:38:49 -04:00
theycallmesteve
6a8c65493f Rename collection_user_details to collection_read_only to reflect the response model 2020-05-08 13:37:40 -04:00
theycallmesteve
dfdf4473ea Rename to_json_list to to_json_provder to reflect the response model 2020-05-08 13:36:35 -04:00
Daniel García
8bbbff7567 Merge pull request #987 from theycallmesteve/global_domains
GlobalEquivalentDomains updates from upstream bitwarden
2020-05-08 01:04:10 +02:00
theycallmesteve
42e37ebea1 Apply upstream global domain values and whitespace fixes 2020-05-07 18:05:17 -04:00
theycallmesteve
632f4d5453 Whitespace fixes 2020-05-07 18:02:37 -04:00
Daniel García
6c5e35ce5c Change the mails content types to more closely match what we sent before 2020-05-07 00:51:46 +02:00
Daniel García
4ff15f6dc2 Merge pull request #978 from AltiUP/patch-1
Delete the call to the map file
2020-05-03 22:30:06 +02:00
Daniel García
ec8028aef2 Merge pull request #979 from jjlin/admin-redirect
Use absolute URIs for admin page redirects
2020-05-03 22:27:09 +02:00
Daniel García
63cbd9ef9c Update lettre to latest master 2020-05-03 17:41:53 +02:00
Daniel García
9cca64003a Remove unused dependency and simple feature, update dependencies and fix some clippy lints 2020-05-03 17:24:51 +02:00
Jeremy Lin
819d5e2dc8 Use absolute URIs for admin page redirects
This is technically required per RFC 2616 (HTTP/1.1); some proxies will
rewrite a plain `/admin` path to an unexpected URL otherwise.
2020-05-01 00:31:47 -07:00
Christophe Gherardi
3b06ab296b Delete the call to the map file
The file bootstrap.css.map is missing, the reference can be deleted.
2020-04-30 19:41:58 +02:00
Daniel García
0de52c6c99 Merge pull request #957 from jjlin/domain-whitelist
Domain whitelist cleanup and fixes
2020-04-18 12:08:48 +02:00
Daniel García
e3b00b59a7 Initial support for soft deletes 2020-04-17 22:35:27 +02:00
Daniel García
5a390a973f Merge pull request #966 from BlackDex/issue-965
Fixed issue #965
2020-04-15 17:15:59 +02:00
BlackDex
1ee8e44912 Fixed issue #965
PostgreSQL updates/inserts ignored None/null values.
This is nice for new entries, but not for updates.
Added derive option to allways add these none/null values for Option<>
variables.

This solves issue #965
2020-04-15 16:49:33 +02:00
Jeremy Lin
86685c1cd2 Ensure email domain comparison is case-insensitive 2020-04-11 14:51:36 -07:00
Daniel García
e3feba2a2c Merge pull request #960 from jjlin/admin-token
Warn on empty `ADMIN_TOKEN` instead of bailing out
2020-04-11 23:34:37 +02:00
Jeremy Lin
0a68de6c24 Warn on empty ADMIN_TOKEN instead of bailing out
The admin page will still be disabled.

Fixes #849.
2020-04-09 20:55:08 -07:00
Daniel García
4be8dae626 Make web vault show a more informative error when browsers block WebCrypto in insecure contexts and update dependencies 2020-04-09 22:54:31 +02:00
Jeremy Lin
e4d08836e2 Make org owner invitations respect the email domain whitelist
This closes a loophole where org owners can invite new users from any domain.
2020-04-09 01:51:05 -07:00
Jeremy Lin
c2a324e5da Clean up domain whitelist logic
* Make `SIGNUPS_DOMAINS_WHITELIST` override the `SIGNUPS_ALLOWED` setting.
  Otherwise, a common pitfall is to set `SIGNUPS_DOMAINS_WHITELIST` without
  realizing that `SIGNUPS_ALLOWED=false` must also be set.

* Whitespace is now accepted in `SIGNUPS_DOMAINS_WHITELIST`. That is,
  `foo.com, bar.com` is now equivalent to `foo.com,bar.com`.

* Add validation on `SIGNUPS_DOMAINS_WHITELIST`. For example, `foo.com,`
  is rejected as containing an empty token.
2020-04-09 01:42:27 -07:00
Daniel García
77f95146d6 Merge pull request #956 from jjlin/duo
Fix Duo auth failure with non-lowercased email addresses
2020-04-08 08:43:24 +02:00
Jeremy Lin
6cd8512bbd Fix Duo auth failure with non-lowercased email addresses 2020-04-07 20:40:51 -07:00
Daniel García
843604c9e7 Merge pull request #939 from jjlin/attachment-size
Fix attachment size limit calculation
2020-03-31 12:56:49 +02:00
Jeremy Lin
7407b8326a Fix attachment size limit calculation
The config values (in KB) need to be converted to bytes when comparing
against total attachment sizes.
2020-03-31 02:30:28 -07:00
Daniel García
adf47827c9 Make sure the data field is always returned, otherwise the mobile apps seem to have issues 2020-03-30 22:19:50 +02:00
Daniel García
5471088e93 Merge pull request #933 from jjlin/dockerfiles
Rebuild Dockerfiles to match latest Dockerfile.j2 template
2020-03-27 17:45:10 +01:00
Daniel García
4e85a1dee1 Update web vault to 2.13.2 2020-03-27 17:44:10 +01:00
Daniel García
ec60839064 Merge pull request #932 from jjlin/ws-fix
Fix WebSocket notifications
2020-03-27 08:38:54 +01:00
Jeremy Lin
d4bfa1a189 Rebuild Dockerfiles to match latest Dockerfile.j2 template
Picks up a couple of missed changes from b837348b and ccf6ee79.
2020-03-26 20:10:33 -07:00
Jeremy Lin
862d401077 Fix WebSocket notifications
Ignore a missing `id` query param; it's unclear what this ID represents,
but it wasn't being used in the existing bitwarden_rs code, and no longer
seems to be sent in the latest versions of the official clients.
2020-03-26 19:26:44 -07:00
Daniel García
255a06382d Merge pull request #928 from jjlin/healthcheck
Healthcheck fixes/optimizations
2020-03-26 21:13:31 +01:00
Jeremy Lin
bbb0484d03 Healthcheck fixes/optimizations
* Switch healthcheck interval/timeout from 30s/3s to 60s/10s.
  30s interval is arguably overkill, and 3s timeout is definitely too short
  for lower end machines.
* Use HEALTHCHECK CMD exec form to avoid superfluous `sh` invocations.
* Add `--silent --show-error` flags to curl call to avoid progress meter being
  shown in healthcheck logs.
2020-03-25 20:13:36 -07:00
Daniel García
93346bc05d Merge pull request #927 from jjlin/healthcheck
Update healthcheck script to handle alternate base dir
2020-03-25 22:21:08 +01:00
Jeremy Lin
fdf50f0064 Update healthcheck script to handle alternate base dir 2020-03-24 20:00:35 -07:00
Daniel García
ccf6ee79d0 Update dependencies, mainly diesel and sqlite 2020-03-24 20:36:19 +01:00
Daniel García
91dd19473d Merge pull request #922 from jjlin/device-push-token
Handle `devicePushToken`
2020-03-23 00:03:10 +01:00
Jeremy Lin
c06162b22f Handle devicePushToken
Mobile push isn't currently supported, but this should get rid of spurious
`Detected unexpected parameter during login: devicepushtoken` warnings.
2020-03-22 15:04:25 -07:00
Daniel García
7a6a3e4160 Set the cargo version and allow changing it during build time with BWRS_VERSION.
Also renamed GIT_VERSION because that's not the only source anymore.
2020-03-22 16:13:34 +01:00
170 changed files with 35829 additions and 7629 deletions

View File

@@ -3,6 +3,7 @@ target
# Data folder # Data folder
data data
.env
# IDE files # IDE files
.vscode .vscode
@@ -10,5 +11,15 @@ data
*.iml *.iml
# Documentation # Documentation
.github
*.md *.md
*.txt
*.yml
*.yaml
# Docker folders
hooks
tools
# Web vault
web-vault

View File

@@ -1,19 +1,34 @@
## Bitwarden_RS Configuration File ## Bitwarden_RS Configuration File
## Uncomment any of the following lines to change the defaults ## Uncomment any of the following lines to change the defaults
##
## Be aware that most of these settings will be overridden if they were changed
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
## Main data folder ## Main data folder
# DATA_FOLDER=data # DATA_FOLDER=data
## Database URL ## Database URL
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3 ## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
## When using MySQL, this it is the URL to the DB, including username and password:
## Format: mysql://[user[:password]@]host/database_name
# DATABASE_URL=data/db.sqlite3 # DATABASE_URL=data/db.sqlite3
## When using MySQL, specify an appropriate connection URI.
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
# DATABASE_URL=mysql://user:password@host[:port]/database_name
## When using PostgreSQL, specify an appropriate connection URI (recommended)
## or keyword/value connection string.
## Details:
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
## Database max connections
## Define the size of the connection pool used for connecting to the database.
# DATABASE_MAX_CONNS=10
## Individual folders, these override %DATA_FOLDER% ## Individual folders, these override %DATA_FOLDER%
# RSA_KEY_FILENAME=data/rsa_key # RSA_KEY_FILENAME=data/rsa_key
# ICON_CACHE_FOLDER=data/icon_cache # ICON_CACHE_FOLDER=data/icon_cache
# ATTACHMENTS_FOLDER=data/attachments # ATTACHMENTS_FOLDER=data/attachments
# SENDS_FOLDER=data/sends
## Templates data folder, by default uses embedded templates ## Templates data folder, by default uses embedded templates
## Check source code to see the format ## Check source code to see the format
@@ -44,6 +59,10 @@
## Enable extended logging, which shows timestamps and targets in the logs ## Enable extended logging, which shows timestamps and targets in the logs
# EXTENDED_LOGGING=true # EXTENDED_LOGGING=true
## Timestamp format used in extended logging.
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
## Logging to file ## Logging to file
## It's recommended to also set 'ROCKET_CLI_COLORS=off' ## It's recommended to also set 'ROCKET_CLI_COLORS=off'
# LOG_FILE=/path/to/log # LOG_FILE=/path/to/log
@@ -56,7 +75,7 @@
## Log level ## Log level
## Change the verbosity of the log output ## Change the verbosity of the log output
## Valid values are "trace", "debug", "info", "warn", "error" and "off" ## Valid values are "trace", "debug", "info", "warn", "error" and "off"
## Setting it to "trace" or "debug" would also show logs for mounted ## Setting it to "trace" or "debug" would also show logs for mounted
## routes and static file, websocket and alive requests ## routes and static file, websocket and alive requests
# LOG_LEVEL=Info # LOG_LEVEL=Info
@@ -68,6 +87,10 @@
## cause performance degradation or might render the service unable to start. ## cause performance degradation or might render the service unable to start.
# ENABLE_DB_WAL=true # ENABLE_DB_WAL=true
## Database connection retries
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
# DB_CONNECTION_RETRIES=15
## Disable icon downloading ## Disable icon downloading
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER, ## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0, ## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
@@ -82,10 +105,11 @@
## Icon blacklist Regex ## Icon blacklist Regex
## Any domains or IPs that match this regex won't be fetched by the icon service. ## Any domains or IPs that match this regex won't be fetched by the icon service.
## Useful to hide other servers in the local network. Check the WIKI for more details ## Useful to hide other servers in the local network. Check the WIKI for more details
# ICON_BLACKLIST_REGEX=192\.168\.1\.[0-9].*^ ## NOTE: Always enclose this regex withing single quotes!
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
## Any IP which is not defined as a global IP will be blacklisted. ## Any IP which is not defined as a global IP will be blacklisted.
## Usefull to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block ## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
# ICON_BLACKLIST_NON_GLOBAL_IPS=true # ICON_BLACKLIST_NON_GLOBAL_IPS=true
## Disable 2FA remember ## Disable 2FA remember
@@ -93,6 +117,18 @@
## Note that the checkbox would still be present, but ignored. ## Note that the checkbox would still be present, but ignored.
# DISABLE_2FA_REMEMBER=false # DISABLE_2FA_REMEMBER=false
## Maximum attempts before an email token is reset and a new email will need to be sent.
# EMAIL_ATTEMPTS_LIMIT=3
## Token expiration time
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
# EMAIL_EXPIRATION_TIME=600
## Email token size
## Number of digits in an email token (min: 6, max: 19).
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
# EMAIL_TOKEN_SIZE=6
## Controls if new users can register ## Controls if new users can register
# SIGNUPS_ALLOWED=true # SIGNUPS_ALLOWED=true
@@ -114,6 +150,14 @@
## even if SIGNUPS_ALLOWED is set to false ## even if SIGNUPS_ALLOWED is set to false
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org # SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
## Controls which users can create new orgs.
## Blank or 'all' means all users can create orgs (this is the default):
# ORG_CREATION_USERS=
## 'none' means no users can create orgs:
# ORG_CREATION_USERS=none
## A comma-separated list means only those users can create orgs:
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
## Token for the admin interface, preferably use a long random string ## Token for the admin interface, preferably use a long random string
## One option is to use 'openssl rand -base64 48' ## One option is to use 'openssl rand -base64 48'
## If not set, the admin panel is disabled ## If not set, the admin panel is disabled
@@ -125,6 +169,16 @@
## Invitations org admins to invite users, even when signups are disabled ## Invitations org admins to invite users, even when signups are disabled
# INVITATIONS_ALLOWED=true # INVITATIONS_ALLOWED=true
## Name shown in the invitation emails that don't come from a specific organization
# INVITATION_ORG_NAME=Bitwarden_RS
## Per-organization attachment limit (KB)
## Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
# ORG_ATTACHMENT_LIMIT=
## Per-user attachment limit (KB).
## Limit in kilobytes for a users attachments, once the limit is exceeded it won't be possible to upload more
# USER_ATTACHMENT_LIMIT=
## Controls the PBBKDF password iterations to apply on the server ## Controls the PBBKDF password iterations to apply on the server
## The change only applies when the password is changed ## The change only applies when the password is changed
@@ -140,6 +194,13 @@
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs ## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
# DOMAIN=https://bw.domain.tld:8443 # DOMAIN=https://bw.domain.tld:8443
## Allowed iframe ancestors (Know the risks!)
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
## Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
## This adds the configured value to the 'Content-Security-Policy' headers 'frame-ancestors' value.
## Multiple values must be separated with a whitespace.
# ALLOWED_IFRAME_ANCESTORS=
## Yubico (Yubikey) Settings ## Yubico (Yubikey) Settings
## Set your Client ID and Secret Key for Yubikey OTP ## Set your Client ID and Secret Key for Yubikey OTP
## You can generate it here: https://upgrade.yubico.com/getapikey/ ## You can generate it here: https://upgrade.yubico.com/getapikey/
@@ -162,7 +223,7 @@
## Authenticator Settings ## Authenticator Settings
## Disable authenticator time drifted codes to be valid. ## Disable authenticator time drifted codes to be valid.
## TOTP codes of the previous and next 30 seconds will be invalid ## TOTP codes of the previous and next 30 seconds will be invalid
## ##
## According to the RFC6238 (https://tools.ietf.org/html/rfc6238), ## According to the RFC6238 (https://tools.ietf.org/html/rfc6238),
## we allow by default the TOTP code which was valid one step back and one in the future. ## we allow by default the TOTP code which was valid one step back and one in the future.
## This can however allow attackers to be a bit more lucky with there attempts because there are 3 valid codes. ## This can however allow attackers to be a bit more lucky with there attempts because there are 3 valid codes.
@@ -183,11 +244,45 @@
# SMTP_HOST=smtp.domain.tld # SMTP_HOST=smtp.domain.tld
# SMTP_FROM=bitwarden-rs@domain.tld # SMTP_FROM=bitwarden-rs@domain.tld
# SMTP_FROM_NAME=Bitwarden_RS # SMTP_FROM_NAME=Bitwarden_RS
# SMTP_PORT=587 # SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
# SMTP_SSL=true # SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
# SMTP_USERNAME=username # SMTP_USERNAME=username
# SMTP_PASSWORD=password # SMTP_PASSWORD=password
# SMTP_AUTH_MECHANISM="Plain"
# SMTP_TIMEOUT=15 # SMTP_TIMEOUT=15
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
## Possible values: ["Plain", "Login", "Xoauth2"].
## Multiple options need to be separated by a comma ','.
# SMTP_AUTH_MECHANISM="Plain"
## Server name sent during the SMTP HELO
## By default this value should be is on the machine's hostname,
## but might need to be changed in case it trips some anti-spam filters
# HELO_NAME=
## SMTP debugging
## When set to true this will output very detailed SMTP messages.
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
# SMTP_DEBUG=false
## Accept Invalid Hostnames
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
## Only use this as a last resort if you are not able to use a valid certificate.
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
## Accept Invalid Certificates
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
## Only use this as a last resort if you are not able to use a valid certificate.
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
# SMTP_ACCEPT_INVALID_CERTS=false
## Require new device emails. When a user logs in an email is required to be sent.
## If sending the email fails the login attempt will fail!!
# REQUIRE_DEVICE_EMAIL=false
## HIBP Api Key
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
# HIBP_API_KEY=
# vim: syntax=ini # vim: syntax=ini

3
.gitattributes vendored Normal file
View File

@@ -0,0 +1,3 @@
# Ignore vendored scripts in GitHub stats
src/static/* linguist-vendored

View File

@@ -1,42 +1,66 @@
--- ---
name: Bug report name: Bug report
about: Create a report to help us improve about: Use this ONLY for bugs in bitwarden_rs itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
title: '' title: ''
labels: '' labels: ''
assignees: '' assignees: ''
--- ---
<!--
# ###
NOTE: Please update to the latest version of bitwarden_rs before reporting an issue!
This saves you and us a lot of time and troubleshooting.
See:
* https://github.com/dani-garcia/bitwarden_rs/issues/1180
* https://github.com/dani-garcia/bitwarden_rs/wiki/Updating-the-bitwarden-image
# ###
-->
<!-- <!--
Please fill out the following template to make solving your problem easier and faster for us. Please fill out the following template to make solving your problem easier and faster for us.
This is only a guideline. If you think that parts are unneccessary for your issue, feel free to remove them. This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them.
Remember to hide/obfuscate personal and confidential information, Remember to hide/redact personal or confidential information,
such as names, global IP/DNS adresses and especially passwords, if neccessary. such as passwords, IP addresses, and DNS names as appropriate.
--> -->
### Subject of the issue ### Subject of the issue
<!-- Describe your issue here.--> <!-- Describe your issue here. -->
### Deployment environment
<!--
=========================================================================================
Preferably, use the `Generate Support String` button on the admin page's Diagnostics tab.
That will auto-generate most of the info requested in this section.
=========================================================================================
-->
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
<!-- This is NOT the version number shown on the web vault, which is versioned separately from bitwarden_rs -->
<!-- Remember to check if your issue exists on the latest version first! -->
* bitwarden_rs version:
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
* Install method:
* Clients used: <!-- web vault, desktop, Android, iOS, etc. (if applicable) -->
### Your environment
<!-- The version number, obtained from the logs or the admin page -->
* Bitwarden_rs version:
<!-- How the server was installed: Docker image / package / built from source -->
* Install method:
* Clients used: <!-- if applicable -->
* Reverse proxy and version: <!-- if applicable --> * Reverse proxy and version: <!-- if applicable -->
* Version of mysql/postgresql: <!-- if applicable -->
* Other relevant information: * MySQL/MariaDB or PostgreSQL version: <!-- if applicable -->
* Other relevant details:
### Steps to reproduce ### Steps to reproduce
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults) <!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
and how did you start bitwarden_rs? --> and how did you start bitwarden_rs? -->
### Expected behaviour ### Expected behaviour
<!-- Tell us what should happen --> <!-- Tell us what you expected to happen -->
### Actual behaviour ### Actual behaviour
<!-- Tell us what happens instead --> <!-- Tell us what actually happened -->
### Relevant logs ### Troubleshooting data
<!-- Share some logfiles, screenshots or output of relevant programs with us. --> <!-- Share any log files, screenshots, or other relevant troubleshooting data -->

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@@ -0,0 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: Discourse forum for bitwarden_rs
url: https://bitwardenrs.discourse.group/
about: Use this forum to request features or get help with usage/configuration.
- name: GitHub Discussions for bitwarden_rs
url: https://github.com/dani-garcia/bitwarden_rs/discussions
about: An alternative to the Discourse forum, if this is easier for you.

View File

@@ -1,11 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: better for forum
assignees: ''
---
# Please submit all your feature requests to the forum
Link: https://bitwardenrs.discourse.group/c/feature-requests

View File

@@ -1,11 +0,0 @@
---
name: Help with installation/configuration
about: Any questions about the setup of bitwarden_rs
title: ''
labels: better for forum
assignees: ''
---
# Please submit all your third party help requests to the forum
Link: https://bitwardenrs.discourse.group/c/help

View File

@@ -1,11 +0,0 @@
---
name: Help with proxy/database/NAS setup
about: Any questions about third party software
title: ''
labels: better for forum
assignees: ''
---
# Please submit all your third party help requests to the forum
Link: https://bitwardenrs.discourse.group/c/third-party-help

144
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,144 @@
name: Build
on:
push:
# Ignore when there are only changes done too one of these paths
paths-ignore:
- "**.md"
- "**.txt"
- "azure-pipelines.yml"
- "docker/**"
- "hooks/**"
- "tools/**"
pull_request:
# Ignore when there are only changes done too one of these paths
paths-ignore:
- "**.md"
- "**.txt"
- "azure-pipelines.yml"
- "docker/**"
- "hooks/**"
- "tools/**"
jobs:
build:
strategy:
fail-fast: false
matrix:
channel:
- nightly
# - stable
target-triple:
- x86_64-unknown-linux-gnu
# - x86_64-unknown-linux-musl
include:
- target-triple: x86_64-unknown-linux-gnu
host-triple: x86_64-unknown-linux-gnu
features: "sqlite,mysql,postgresql"
channel: nightly
os: ubuntu-18.04
ext:
# - target-triple: x86_64-unknown-linux-gnu
# host-triple: x86_64-unknown-linux-gnu
# features: "sqlite,mysql,postgresql"
# channel: stable
# os: ubuntu-18.04
# ext:
# - target-triple: x86_64-unknown-linux-musl
# host-triple: x86_64-unknown-linux-gnu
# features: "sqlite,postgresql"
# channel: nightly
# os: ubuntu-18.04
# ext:
# - target-triple: x86_64-unknown-linux-musl
# host-triple: x86_64-unknown-linux-gnu
# features: "sqlite,postgresql"
# channel: stable
# os: ubuntu-18.04
# ext:
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
runs-on: ${{ matrix.os }}
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@v2
# End Checkout the repo
# Install musl-tools when needed
- name: Install musl tools
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends musl-dev musl-tools cmake
if: matrix.target-triple == 'x86_64-unknown-linux-musl'
# End Install musl-tools when needed
# Install dependencies
- name: Install dependencies Ubuntu
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
if: startsWith( matrix.os, 'ubuntu' )
# End Install dependencies
# Enable Rust Caching
- uses: Swatinem/rust-cache@v1
# End Enable Rust Caching
# Uses the rust-toolchain file to determine version
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
uses: actions-rs/toolchain@v1
with:
profile: minimal
target: ${{ matrix.target-triple }}
components: clippy
# End Uses the rust-toolchain file to determine version
# Run cargo tests (In release mode to speed up future builds)
- name: '`cargo test --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
uses: actions-rs/cargo@v1
with:
command: test
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
# End Run cargo tests
# Run cargo clippy (In release mode to speed up future builds)
- name: '`cargo clippy --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
uses: actions-rs/cargo@v1
with:
command: clippy
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
# End Run cargo clippy
# Build the binary
- name: '`cargo build --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
uses: actions-rs/cargo@v1
with:
command: build
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
# End Build the binary
# Upload artifact to Github Actions
- name: Upload artifact
uses: actions/upload-artifact@v2
with:
name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
# End Upload artifact to Github Actions
## This is not used at the moment
## We could start using this when we can build static binaries
# Upload to github actions release
# - name: Release
# uses: Shopify/upload-to-release@1
# if: startsWith(github.ref, 'refs/tags/')
# with:
# name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
# path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
# repo-token: ${{ secrets.GITHUB_TOKEN }}
# End Upload to github actions release

34
.github/workflows/hadolint.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: Hadolint
on:
pull_request:
# Ignore when there are only changes done too one of these paths
paths:
- "docker/**"
jobs:
hadolint:
name: Validate Dockerfile syntax
runs-on: ubuntu-20.04
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@v2
# End Checkout the repo
# Download hadolint
- name: Download hadolint
shell: bash
run: |
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
sudo chmod +x /usr/local/bin/hadolint
env:
HADOLINT_VERSION: 1.19.0
# End Download hadolint
# Test Dockerfiles
- name: Run hadolint
shell: bash
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored | xargs hadolint
# End Test Dockerfiles

View File

@@ -1,148 +0,0 @@
name: Workflow
on:
push:
paths-ignore:
- "**.md"
#pull_request:
# paths-ignore:
# - "**.md"
jobs:
build:
name: Build
strategy:
fail-fast: false
matrix:
db-backend: [sqlite, mysql, postgresql]
target:
- x86_64-unknown-linux-gnu
# - x86_64-unknown-linux-musl
# - x86_64-apple-darwin
# - x86_64-pc-windows-msvc
include:
- target: x86_64-unknown-linux-gnu
os: ubuntu-latest
ext:
# - target: x86_64-unknown-linux-musl
# os: ubuntu-latest
# ext:
# - target: x86_64-apple-darwin
# os: macOS-latest
# ext:
# - target: x86_64-pc-windows-msvc
# os: windows-latest
# ext: .exe
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v1
# - name: Cache choco cache
# uses: actions/cache@v1.0.3
# if: matrix.os == 'windows-latest'
# with:
# path: ~\AppData\Local\Temp\chocolatey
# key: ${{ runner.os }}-choco-cache-${{ matrix.db-backend }}
- name: Cache vcpkg installed
uses: actions/cache@v1.0.3
if: matrix.os == 'windows-latest'
with:
path: $VCPKG_ROOT/installed
key: ${{ runner.os }}-vcpkg-cache-${{ matrix.db-backend }}
env:
VCPKG_ROOT: 'C:\vcpkg'
- name: Cache vcpkg downloads
uses: actions/cache@v1.0.3
if: matrix.os == 'windows-latest'
with:
path: $VCPKG_ROOT/downloads
key: ${{ runner.os }}-vcpkg-cache-${{ matrix.db-backend }}
env:
VCPKG_ROOT: 'C:\vcpkg'
# - name: Cache homebrew
# uses: actions/cache@v1.0.3
# if: matrix.os == 'macOS-latest'
# with:
# path: ~/Library/Caches/Homebrew
# key: ${{ runner.os }}-brew-cache
# - name: Cache apt
# uses: actions/cache@v1.0.3
# if: matrix.os == 'ubuntu-latest'
# with:
# path: /var/cache/apt/archives
# key: ${{ runner.os }}-apt-cache
# Install dependencies
- name: Install dependencies macOS
run: brew update; brew install openssl sqlite libpq mysql
if: matrix.os == 'macOS-latest'
- name: Install dependencies Ubuntu
run: sudo apt-get update && sudo apt-get install --no-install-recommends openssl sqlite libpq-dev libmysql++-dev
if: matrix.os == 'ubuntu-latest'
- name: Install dependencies Windows
run: vcpkg integrate install; vcpkg install sqlite3:x64-windows openssl:x64-windows libpq:x64-windows libmysql:x64-windows
if: matrix.os == 'windows-latest'
env:
VCPKG_ROOT: 'C:\vcpkg'
# End Install dependencies
# Install rust nightly toolchain
- name: Cache cargo registry
uses: actions/cache@v1.0.3
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v1.0.3
with:
path: ~/.cargo/git
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo build
uses: actions/cache@v1.0.3
with:
path: target
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }}
- name: Install latest nightly
uses: actions-rs/toolchain@v1.0.5
with:
# Uses rust-toolchain to determine version
profile: minimal
target: ${{ matrix.target }}
# Build
- name: Build Win
if: matrix.os == 'windows-latest'
run: cargo.exe build --features ${{ matrix.db-backend }} --release --target ${{ matrix.target }}
env:
RUSTFLAGS: -Ctarget-feature=+crt-static
VCPKG_ROOT: 'C:\vcpkg'
- name: Build macOS / Ubuntu
if: matrix.os == 'macOS-latest' || matrix.os == 'ubuntu-latest'
run: cargo build --verbose --features ${{ matrix.db-backend }} --release --target ${{ matrix.target }}
# Test
- name: Run tests
run: cargo test --features ${{ matrix.db-backend }}
# Upload & Release
- name: Upload artifact
uses: actions/upload-artifact@v1.0.0
with:
name: bitwarden_rs-${{ matrix.db-backend }}-${{ matrix.target }}${{ matrix.ext }}
path: target/${{ matrix.target }}/release/bitwarden_rs${{ matrix.ext }}
- name: Release
uses: Shopify/upload-to-release@1.0.0
if: startsWith(github.ref, 'refs/tags/')
with:
name: bitwarden_rs-${{ matrix.db-backend }}-${{ matrix.target }}${{ matrix.ext }}
path: target/${{ matrix.target }}/release/bitwarden_rs${{ matrix.ext }}
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,21 +0,0 @@
dist: xenial
env:
global:
- HADOLINT_VERSION=1.17.1
language: rust
rust: nightly
cache: cargo
before_install:
- sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint
- sudo chmod +rx /usr/local/bin/hadolint
- rustup set profile minimal
# Nothing to install
install: true
script:
- git ls-files --exclude='Dockerfile*' --ignored | xargs --max-lines=1 hadolint
- cargo test --features "sqlite"
- cargo test --features "mysql"

1962
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -16,6 +16,12 @@ enable_syslog = []
mysql = ["diesel/mysql", "diesel_migrations/mysql"] mysql = ["diesel/mysql", "diesel_migrations/mysql"]
postgresql = ["diesel/postgres", "diesel_migrations/postgres"] postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"] sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
# Enable to use a vendored and statically linked openssl
vendored_openssl = ["openssl/vendored"]
# Enable unstable features, requires nightly
# Currently only used to enable rusts official ip support
unstable = []
[target."cfg(not(windows))".dependencies] [target."cfg(not(windows))".dependencies]
syslog = "4.0.1" syslog = "4.0.1"
@@ -26,108 +32,106 @@ rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
rocket_contrib = "0.5.0-dev" rocket_contrib = "0.5.0-dev"
# HTTP client # HTTP client
reqwest = { version = "0.10.4", features = ["blocking", "json"] } reqwest = { version = "0.11.2", features = ["blocking", "json"] }
# multipart/form-data support # multipart/form-data support
multipart = { version = "0.16.1", features = ["server"], default-features = false } multipart = { version = "0.17.1", features = ["server"], default-features = false }
# WebSockets library # WebSockets library
ws = "0.9.1" ws = { version = "0.10.0", package = "parity-ws" }
# MessagePack library # MessagePack library
rmpv = "0.4.4" rmpv = "0.4.7"
# Concurrent hashmap implementation # Concurrent hashmap implementation
chashmap = "2.2.2" chashmap = "2.2.2"
# A generic serialization/deserialization framework # A generic serialization/deserialization framework
serde = "1.0.104" serde = { version = "1.0.125", features = ["derive"] }
serde_derive = "1.0.104" serde_json = "1.0.64"
serde_json = "1.0.48"
# Logging # Logging
log = "0.4.8" log = "0.4.14"
fern = { version = "0.6.0", features = ["syslog-4"] } fern = { version = "0.6.0", features = ["syslog-4"] }
# A safe, extensible ORM and Query builder # A safe, extensible ORM and Query builder
diesel = { version = "1.4.3", features = [ "chrono", "r2d2"] } diesel = { version = "1.4.6", features = [ "chrono", "r2d2"] }
diesel_migrations = "1.4.0" diesel_migrations = "1.4.0"
# Bundled SQLite # Bundled SQLite
libsqlite3-sys = { version = "0.16.0", features = ["bundled"], optional = true } libsqlite3-sys = { version = "0.20.1", features = ["bundled"], optional = true }
# Crypto library # Crypto-related libraries
ring = "0.16.11" rand = "0.8.3"
ring = "0.16.20"
# UUID generation # UUID generation
uuid = { version = "0.8.1", features = ["v4"] } uuid = { version = "0.8.2", features = ["v4"] }
# Date and time librar for Rust # Date and time libraries
chrono = "0.4.11" chrono = { version = "0.4.19", features = ["serde"] }
time = "0.2.9" chrono-tz = "0.5.3"
time = "0.2.26"
# TOTP library # TOTP library
oath = "0.10.2" oath = "0.10.2"
# Data encoding library # Data encoding library
data-encoding = "2.2.0" data-encoding = "2.3.2"
# JWT library # JWT library
jsonwebtoken = "7.1.0" jsonwebtoken = "7.2.0"
# U2F library # U2F library
u2f = "0.2.0" u2f = "0.2.0"
# Yubico Library # Yubico Library
yubico = { version = "0.9.0", features = ["online-tokio"], default-features = false } yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false }
# A `dotenv` implementation for Rust # A `dotenv` implementation for Rust
dotenv = { version = "0.15.0", default-features = false } dotenv = { version = "0.15.0", default-features = false }
# Lazy initialization # Lazy initialization
once_cell = "1.3.1" once_cell = "1.7.2"
# More derives
derive_more = "0.99.3"
# Numerical libraries # Numerical libraries
num-traits = "0.2.11" num-traits = "0.2.14"
num-derive = "0.3.0" num-derive = "0.3.3"
# Email libraries # Email libraries
lettre = "0.10.0-pre" lettre = { version = "0.10.0-beta.3", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
native-tls = "0.2.4" newline-converter = "0.2.0"
quoted_printable = "0.4.2"
# Template library # Template library
handlebars = { version = "3.0.1", features = ["dir_source"] } handlebars = { version = "3.5.3", features = ["dir_source"] }
# For favicon extraction from main website # For favicon extraction from main website
soup = "0.5.0" html5ever = "0.25.1"
regex = "1.3.4" markup5ever_rcdom = "0.1.0"
regex = { version = "1.4.5", features = ["std", "perf"], default-features = false }
data-url = "0.1.0" data-url = "0.1.0"
# Used by U2F, JWT and Postgres # Used by U2F, JWT and Postgres
openssl = "0.10.28" openssl = "0.10.33"
# URL encoding library # URL encoding library
percent-encoding = "2.1.0" percent-encoding = "2.1.0"
# Punycode conversion # Punycode conversion
idna = "0.2.0" idna = "0.2.2"
# CLI argument parsing # CLI argument parsing
structopt = "0.3.11" pico-args = "0.4.0"
# Logging panics to logfile instead stderr only # Logging panics to logfile instead stderr only
backtrace = "0.3.45" backtrace = "0.3.56"
# Macro ident concatenation
paste = "1.0.5"
[patch.crates-io] [patch.crates-io]
# Use newest ring # Use newest ring
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dfc9e9aab01d349da32c52db393e35b7fffea63c' } rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dfc9e9aab01d349da32c52db393e35b7fffea63c' } rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
# Use git version for timeout fix #706
lettre = { git = 'https://github.com/lettre/lettre', rev = '245c600c82ee18b766e8729f005ff453a55dce34' }
# For favicon extraction from main website # For favicon extraction from main website
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '7f1bd6ce1c2fde599a757302a843a60e714c5f72' } data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '540ede02d0771824c0c80ff9f57fe8eff38b1291' }

View File

@@ -1 +1 @@
docker/amd64/sqlite/Dockerfile docker/amd64/Dockerfile

View File

@@ -21,7 +21,6 @@ Image is based on [Rust implementation of Bitwarden API](https://github.com/dani
Basically full implementation of Bitwarden API is provided including: Basically full implementation of Bitwarden API is provided including:
* Single user functionality
* Organizations support * Organizations support
* Attachments * Attachments
* Vault API support * Vault API support
@@ -59,3 +58,4 @@ If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org
Thanks for your contribution to the project! Thanks for your contribution to the project!
- [@ChonoN](https://github.com/ChonoN) - [@ChonoN](https://github.com/ChonoN)
- [@themightychris](https://github.com/themightychris)

View File

@@ -1,5 +1,5 @@
pool: pool:
vmImage: 'Ubuntu-16.04' vmImage: 'Ubuntu-18.04'
steps: steps:
- script: | - script: |
@@ -10,16 +10,13 @@ steps:
- script: | - script: |
sudo apt-get update sudo apt-get update
sudo apt-get install -y libmysql++-dev sudo apt-get install -y --no-install-recommends build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
displayName: Install libmysql displayName: 'Install build libraries.'
- script: | - script: |
rustc -Vv rustc -Vv
cargo -V cargo -V
displayName: Query rust and cargo versions displayName: Query rust and cargo versions
- script : cargo test --features "sqlite" - script : cargo test --features "sqlite,mysql,postgresql"
displayName: 'Test project with sqlite backend' displayName: 'Test project with sqlite, mysql and postgresql backends'
- script : cargo test --features "mysql"
displayName: 'Test project with mysql backend'

View File

@@ -1,17 +1,24 @@
use std::process::Command; use std::process::Command;
use std::env;
fn main() { fn main() {
#[cfg(all(feature = "sqlite", feature = "mysql"))] // This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
compile_error!("Can't enable both sqlite and mysql at the same time"); #[cfg(feature = "sqlite")]
#[cfg(all(feature = "sqlite", feature = "postgresql"))] println!("cargo:rustc-cfg=sqlite");
compile_error!("Can't enable both sqlite and postgresql at the same time"); #[cfg(feature = "mysql")]
#[cfg(all(feature = "mysql", feature = "postgresql"))] println!("cargo:rustc-cfg=mysql");
compile_error!("Can't enable both mysql and postgresql at the same time"); #[cfg(feature = "postgresql")]
println!("cargo:rustc-cfg=postgresql");
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))] #[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"); compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
read_git_info().ok(); if let Ok(version) = env::var("BWRS_VERSION") {
println!("cargo:rustc-env=BWRS_VERSION={}", version);
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
} else {
read_git_info().ok();
}
} }
fn run(args: &[&str]) -> Result<String, std::io::Error> { fn run(args: &[&str]) -> Result<String, std::io::Error> {
@@ -54,14 +61,16 @@ fn read_git_info() -> Result<(), std::io::Error> {
} else { } else {
format!("{}-{}", last_tag, rev_short) format!("{}-{}", last_tag, rev_short)
}; };
println!("cargo:rustc-env=GIT_VERSION={}", version);
println!("cargo:rustc-env=BWRS_VERSION={}", version);
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
// To access these values, use: // To access these values, use:
// env!("GIT_EXACT_TAG") // env!("GIT_EXACT_TAG")
// env!("GIT_LAST_TAG") // env!("GIT_LAST_TAG")
// env!("GIT_BRANCH") // env!("GIT_BRANCH")
// env!("GIT_REV") // env!("GIT_REV")
// env!("GIT_VERSION") // env!("BWRS_VERSION")
Ok(()) Ok(())
} }

33
docker/Dockerfile.buildx Normal file
View File

@@ -0,0 +1,33 @@
# The cross-built images have the build arch (`amd64`) embedded in the image
# manifest, rather than the target arch. For example:
#
# $ docker inspect bitwardenrs/server:latest-armv7 | jq -r '.[]|.Architecture'
# amd64
#
# Recent versions of Docker have started printing a warning when the image's
# claimed arch doesn't match the host arch. For example:
#
# WARNING: The requested image's platform (linux/amd64) does not match the
# detected host platform (linux/arm/v7) and no specific platform was requested
#
# The image still works fine, but the spurious warning creates confusion.
#
# Docker doesn't seem to provide a way to directly set the arch of an image
# at build time. To resolve the build vs. target arch discrepancy, we use
# Docker Buildx to build a new set of images with the correct target arch.
#
# Docker Buildx uses this Dockerfile to build an image for each requested
# platform. Since the Dockerfile basically consists of a single `FROM`
# instruction, we're effectively telling Buildx to build a platform-specific
# image by simply copying the existing cross-built image and setting the
# correct target arch as a side effect.
#
# References:
#
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
#
ARG LOCAL_REPO
ARG DOCKER_TAG
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}

View File

@@ -1,68 +1,89 @@
# This file was generated using a Jinja2 template. # This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's. # Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
{% set build_stage_base_image = "rust:1.40" %} {% set build_stage_base_image = "rust:1.50" %}
{% if "alpine" in target_file %} {% if "alpine" in target_file %}
{% set build_stage_base_image = "clux/muslrust:nightly-2020-03-09" %} {% if "amd64" in target_file %}
{% set runtime_stage_base_image = "alpine:3.11" %} {% set build_stage_base_image = "clux/muslrust:nightly-2021-02-22" %}
{% set package_arch_name = "" %} {% set runtime_stage_base_image = "alpine:3.13" %}
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
{% elif "armv7" in target_file %}
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.13" %}
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
{% endif %}
{% elif "amd64" in target_file %} {% elif "amd64" in target_file %}
{% set runtime_stage_base_image = "debian:buster-slim" %} {% set runtime_stage_base_image = "debian:buster-slim" %}
{% set package_arch_name = "" %} {% elif "arm64" in target_file %}
{% elif "aarch64" in target_file %}
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %} {% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
{% set package_arch_name = "arm64" %} {% set package_arch_name = "arm64" %}
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
{% set package_cross_compiler = "aarch64-linux-gnu" %}
{% elif "armv6" in target_file %} {% elif "armv6" in target_file %}
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %} {% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
{% set package_arch_name = "armel" %} {% set package_arch_name = "armel" %}
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
{% set package_cross_compiler = "arm-linux-gnueabi" %}
{% elif "armv7" in target_file %} {% elif "armv7" in target_file %}
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %} {% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
{% set package_arch_name = "armhf" %} {% set package_arch_name = "armhf" %}
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
{% endif %} {% endif %}
{% set package_arch_prefix = ":" + package_arch_name %} {% if package_arch_name is defined %}
{% if package_arch_name == "" %} {% set package_arch_prefix = ":" + package_arch_name %}
{% else %}
{% set package_arch_prefix = "" %} {% set package_arch_prefix = "" %}
{% endif %} {% endif %}
{% if package_arch_target is defined %}
{% set package_arch_target_param = " --target=" + package_arch_target %}
{% else %}
{% set package_arch_target_param = "" %}
{% endif %}
# Using multistage build: # Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/ # https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/ # https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE ####################### ####################### VAULT BUILD IMAGE #######################
{% set vault_image_hash = "sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c" %} {% set vault_version = "2.19.0" %}
{% raw %} {% set vault_image_digest = "sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4" %}
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable. # The web-vault digest specifies a particular web-vault build on Docker Hub.
# It can be viewed in multiple ways: # Using the digest instead of the tag name provides better security,
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # as the digest of an image is immutable, whereas a tag name can later
# - From the console, with the following commands: # be changed to point to a malicious image.
# docker pull bitwardenrs/web-vault:v2.12.0e
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
# #
# - To do the opposite, and get the tag from the hash, you can do: # To verify the current digest for a given tag name:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c # - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
{% endraw %} # click the tag name to view the digest of the image it currently points to.
FROM bitwardenrs/web-vault@{{ vault_image_hash }} as vault # - From the command line:
# $ docker pull bitwardenrs/web-vault:v{{ vault_version }}
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" bitwardenrs/web-vault:v{{ vault_version }}
# [bitwardenrs/web-vault@{{ vault_image_digest }}]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" bitwardenrs/web-vault@{{ vault_image_digest }}
# [bitwardenrs/web-vault:v{{ vault_version }}]
#
FROM bitwardenrs/web-vault@{{ vault_image_digest }} as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
{% if "musl" in build_stage_base_image %}
# Musl build image for statically compiled binary
{% else %}
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
{% endif %}
FROM {{ build_stage_base_image }} as build FROM {{ build_stage_base_image }} as build
{% if "sqlite" in target_file %} {% if "alpine" in target_file %}
# set sqlite as default for DB ARG for backward compatibility {% if "amd64" in target_file %}
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
ARG DB=sqlite,postgresql
{% set features = "sqlite,postgresql" %}
{% else %}
# Alpine-based ARM (musl) only supports sqlite during compile time.
ARG DB=sqlite ARG DB=sqlite
{% set features = "sqlite" %}
{% elif "mysql" in target_file %} {% endif %}
# set mysql backend {% else %}
ARG DB=mysql # Debian-based builds support multidb
ARG DB=sqlite,mysql,postgresql
{% elif "postgresql" in target_file %} {% set features = "sqlite,mysql,postgresql" %}
# set postgresql backend
ARG DB=postgresql
{% endif %} {% endif %}
# Build time options to avoid dpkg warnings and help with reproducible builds. # Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
@@ -72,9 +93,12 @@ RUN rustup set profile minimal
{% if "alpine" in target_file %} {% if "alpine" in target_file %}
ENV USER "root" ENV USER "root"
ENV RUSTFLAGS='-C link-arg=-s' ENV RUSTFLAGS='-C link-arg=-s'
{% if "armv7" in target_file %}
{% elif "aarch64" in target_file or "armv" in target_file %} ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
{% endif %}
{% elif "arm" in target_file %}
# Install required build libs for {{ package_arch_name }} architecture. # Install required build libs for {{ package_arch_name }} architecture.
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \ /etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture {{ package_arch_name }} \ && dpkg --add-architecture {{ package_arch_name }} \
@@ -82,77 +106,34 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \ --no-install-recommends \
libssl-dev{{ package_arch_prefix }} \ libssl-dev{{ package_arch_prefix }} \
libc6-dev{{ package_arch_prefix }} libc6-dev{{ package_arch_prefix }} \
libpq5{{ package_arch_prefix }} \
libpq-dev \
libmariadb-dev{{ package_arch_prefix }} \
libmariadb-dev-compat{{ package_arch_prefix }}
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-{{ package_cross_compiler }} \
&& mkdir -p ~/.cargo \
&& echo '[target.{{ package_arch_target }}]' >> ~/.cargo/config \
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> ~/.cargo/config \
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
{% endif -%} {% endif -%}
{% if "aarch64" in target_file %}
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-aarch64-linux-gnu \
&& mkdir -p ~/.cargo \
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo" {% if "amd64" in target_file and "alpine" not in target_file %}
ENV USER "root" # Install DB packages
{% elif "armv6" in target_file %}
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabi \
&& mkdir -p ~/.cargo \
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
{% elif "armv6" in target_file %}
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabihf \
&& mkdir -p ~/.cargo \
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
{% elif "armv7" in target_file %}
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabihf \
&& mkdir -p ~/.cargo \
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
{% endif %}
{% if "mysql" in target_file %}
# Install MySQL package
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
--no-install-recommends \ --no-install-recommends \
{% if "musl" in build_stage_base_image %}
libmysqlclient-dev{{ package_arch_prefix }} \
{% else %}
libmariadb-dev{{ package_arch_prefix }} \ libmariadb-dev{{ package_arch_prefix }} \
{% endif %}
&& rm -rf /var/lib/apt/lists/*
{% elif "postgresql" in target_file %}
# Install PostgreSQL package
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libpq-dev{{ package_arch_prefix }} \ libpq-dev{{ package_arch_prefix }} \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
{% endif %} {% endif %}
# Creates a dummy project used to grab dependencies # Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app RUN USER=root cargo new --bin /app
WORKDIR /app WORKDIR /app
@@ -162,39 +143,38 @@ COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs COPY ./build.rs ./build.rs
{% if "aarch64" in target_file %} {% if "alpine" not in target_file %}
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" {% if "arm" in target_file %}
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
# What we can do is a force install, because nothing important is overlapping each other.
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
apt-get download libmariadb-dev-compat:amd64 && \
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
rm -rvf ./libmariadb-dev-compat*.deb
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
# Without this specific file the ld command will fail and compilation fails with it.
RUN ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc"
ENV CROSS_COMPILE="1" ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" ENV OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}"
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu" ENV OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
{% elif "armv6" in target_file %} {% endif -%}
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
{% elif "armv7" in target_file %}
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
{% endif -%}
{% if "alpine" in target_file %}
RUN rustup target add x86_64-unknown-linux-musl
{% elif "aarch64" in target_file %}
RUN rustup target add aarch64-unknown-linux-gnu
{% elif "armv6" in target_file %}
RUN rustup target add arm-unknown-linux-gnueabi
{% elif "armv7" in target_file %}
RUN rustup target add armv7-unknown-linux-gnueabihf
{% endif %} {% endif %}
{% if package_arch_target is defined %}
RUN rustup target add {{ package_arch_target }}
{% endif %}
# Builds your dependencies and removes the # Builds your dependencies and removes the
# dummy project, except the target folder # dummy project, except the target folder
# This folder contains the compiled dependencies # This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
RUN find . -not -path "./target*" -delete RUN find . -not -path "./target*" -delete
# Copies the complete project # Copies the complete project
@@ -206,14 +186,11 @@ RUN touch src/main.rs
# Builds again, this time it'll just be # Builds again, this time it'll just be
# your actual source files being built # your actual source files being built
{% if "amd64" in target_file %} RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
RUN cargo build --features ${DB} --release {% if "alpine" in target_file %}
{% elif "aarch64" in target_file %} {% if "armv7" in target_file %}
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu RUN musl-strip target/{{ package_arch_target }}/release/bitwarden_rs
{% elif "armv6" in target_file %} {% endif %}
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
{% elif "armv7" in target_file %}
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
{% endif %} {% endif %}
######################## RUNTIME IMAGE ######################## ######################## RUNTIME IMAGE ########################
@@ -237,11 +214,14 @@ RUN [ "cross-build-start" ]
RUN apk add --no-cache \ RUN apk add --no-cache \
openssl \ openssl \
curl \ curl \
{% if "sqlite" in target_file %} dumb-init \
{% if "sqlite" in features %}
sqlite \ sqlite \
{% elif "mysql" in target_file %} {% endif %}
{% if "mysql" in features %}
mariadb-connector-c \ mariadb-connector-c \
{% elif "postgresql" in target_file %} {% endif %}
{% if "postgresql" in features %}
postgresql-libs \ postgresql-libs \
{% endif %} {% endif %}
ca-certificates ca-certificates
@@ -251,13 +231,10 @@ RUN apt-get update && apt-get install -y \
openssl \ openssl \
ca-certificates \ ca-certificates \
curl \ curl \
{% if "sqlite" in target_file %} dumb-init \
sqlite3 \ sqlite3 \
{% elif "mysql" in target_file %} libmariadb-dev-compat \
libmariadbclient-dev \
{% elif "postgresql" in target_file %}
libpq5 \ libpq5 \
{% endif %}
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
{% endif %} {% endif %}
@@ -275,22 +252,18 @@ EXPOSE 3012
# and the binary from the "build" stage to the current stage # and the binary from the "build" stage to the current stage
COPY Rocket.toml . COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
{% if "alpine" in target_file %} {% if package_arch_target is defined %}
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs . COPY --from=build /app/target/{{ package_arch_target }}/release/bitwarden_rs .
{% elif "aarch64" in target_file %}
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
{% elif "armv6" in target_file %}
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
{% elif "armv7" in target_file %}
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
{% else %} {% else %}
COPY --from=build app/target/release/bitwarden_rs . COPY --from=build /app/target/release/bitwarden_rs .
{% endif %} {% endif %}
COPY docker/healthcheck.sh ./healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1 HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

View File

@@ -1,4 +1,4 @@
OBJECTS := $(shell find -mindepth 2 -name 'Dockerfile*') OBJECTS := $(shell find ./ -mindepth 2 -name 'Dockerfile*')
all: $(OBJECTS) all: $(OBJECTS)

3
docker/README.md Normal file
View File

@@ -0,0 +1,3 @@
The arch-specific directory names follow the arch identifiers used by the Docker official images:
https://github.com/docker-library/official-images/blob/master/README.md#architectures-other-than-amd64

View File

@@ -1,133 +0,0 @@
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
# It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.12.0e
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.40 as build
# set mysql backend
ARG DB=mysql
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
# Don't download rust docs
RUN rustup set profile minimal
# Install required build libs for arm64 architecture.
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture arm64 \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:arm64 \
libc6-dev:arm64
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-aarch64-linux-gnu \
&& mkdir -p ~/.cargo \
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
# Install MySQL package
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libmariadb-dev:arm64 \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
RUN rustup target add aarch64-unknown-linux-gnu
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/aarch64-debian:buster
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
libmariadbclient-dev \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -1,29 +1,34 @@
# This file was generated using a Jinja2 template. # This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's. # Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build: # Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/ # https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/ # https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE ####################### ####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable. # Using the digest instead of the tag name provides better security,
# It can be viewed in multiple ways: # as the digest of an image is immutable, whereas a tag name can later
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # be changed to point to a malicious image.
# - From the console, with the following commands: #
# docker pull bitwardenrs/web-vault:v2.12.0e # To verify the current digest for a given tag name:
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e # - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
# # click the tag name to view the digest of the image it currently points to.
# - To do the opposite, and get the tag from the hash, you can do: # - From the command line:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c # $ docker pull bitwardenrs/web-vault:v2.19.0
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault # $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
# [bitwardenrs/web-vault:v2.19.0]
#
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because FROM rust:1.50 as build
# we need the Rust compiler and Cargo tooling
FROM rust:1.40 as build
# set postgresql backend # Debian-based builds support multidb
ARG DB=postgresql ARG DB=sqlite,mysql,postgresql
# Build time options to avoid dpkg warnings and help with reproducible builds. # Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
@@ -31,9 +36,10 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
# Don't download rust docs # Don't download rust docs
RUN rustup set profile minimal RUN rustup set profile minimal
# Install PostgreSQL package # Install DB packages
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
--no-install-recommends \ --no-install-recommends \
libmariadb-dev \
libpq-dev \ libpq-dev \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
@@ -46,6 +52,7 @@ COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs COPY ./build.rs ./build.rs
# Builds your dependencies and removes the # Builds your dependencies and removes the
# dummy project, except the target folder # dummy project, except the target folder
# This folder contains the compiled dependencies # This folder contains the compiled dependencies
@@ -78,6 +85,9 @@ RUN apt-get update && apt-get install -y \
openssl \ openssl \
ca-certificates \ ca-certificates \
curl \ curl \
dumb-init \
sqlite3 \
libmariadb-dev-compat \
libpq5 \ libpq5 \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
@@ -90,12 +100,14 @@ EXPOSE 3012
# and the binary from the "build" stage to the current stage # and the binary from the "build" stage to the current stage
COPY Rocket.toml . COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs . COPY --from=build /app/target/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1 HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

View File

@@ -1,28 +1,34 @@
# This file was generated using a Jinja2 template. # This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's. # Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build: # Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/ # https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/ # https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE ####################### ####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable. # Using the digest instead of the tag name provides better security,
# It can be viewed in multiple ways: # as the digest of an image is immutable, whereas a tag name can later
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # be changed to point to a malicious image.
# - From the console, with the following commands: #
# docker pull bitwardenrs/web-vault:v2.12.0e # To verify the current digest for a given tag name:
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e # - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
# # click the tag name to view the digest of the image it currently points to.
# - To do the opposite, and get the tag from the hash, you can do: # - From the command line:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c # $ docker pull bitwardenrs/web-vault:v2.19.0
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault # $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
# [bitwardenrs/web-vault:v2.19.0]
#
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary FROM clux/muslrust:nightly-2021-02-22 as build
FROM clux/muslrust:nightly-2019-12-19 as build
# set postgresql backend # Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
ARG DB=postgresql ARG DB=sqlite,postgresql
# Build time options to avoid dpkg warnings and help with reproducible builds. # Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
@@ -31,12 +37,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
RUN rustup set profile minimal RUN rustup set profile minimal
ENV USER "root" ENV USER "root"
ENV RUSTFLAGS='-C link-arg=-s'
# Install PostgreSQL package
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libpq-dev \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies # Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app RUN USER=root cargo new --bin /app
@@ -52,7 +53,7 @@ RUN rustup target add x86_64-unknown-linux-musl
# Builds your dependencies and removes the # Builds your dependencies and removes the
# dummy project, except the target folder # dummy project, except the target folder
# This folder contains the compiled dependencies # This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
RUN find . -not -path "./target*" -delete RUN find . -not -path "./target*" -delete
# Copies the complete project # Copies the complete project
@@ -64,12 +65,12 @@ RUN touch src/main.rs
# Builds again, this time it'll just be # Builds again, this time it'll just be
# your actual source files being built # your actual source files being built
RUN cargo build --features ${DB} --release RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
######################## RUNTIME IMAGE ######################## ######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image # Create a new stage with a minimal image
# because we already have a binary built # because we already have a binary built
FROM alpine:3.11 FROM alpine:3.13
ENV ROCKET_ENV "staging" ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80 ENV ROCKET_PORT=80
@@ -80,6 +81,8 @@ ENV SSL_CERT_DIR=/etc/ssl/certs
RUN apk add --no-cache \ RUN apk add --no-cache \
openssl \ openssl \
curl \ curl \
dumb-init \
sqlite \
postgresql-libs \ postgresql-libs \
ca-certificates ca-certificates
@@ -94,10 +97,12 @@ COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs . COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1 HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

View File

@@ -1,101 +0,0 @@
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
# It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.12.0e
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.40 as build
# set mysql backend
ARG DB=mysql
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
# Don't download rust docs
RUN rustup set profile minimal
# Install MySQL package
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libmariadb-dev \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM debian:buster-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
libmariadbclient-dev \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -1,103 +0,0 @@
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
# It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.12.0e
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary
FROM clux/muslrust:nightly-2019-12-19 as build
# set mysql backend
ARG DB=mysql
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
# Don't download rust docs
RUN rustup set profile minimal
ENV USER "root"
# Install MySQL package
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libmysqlclient-dev \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
RUN rustup target add x86_64-unknown-linux-musl
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM alpine:3.11
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs
# Install needed libraries
RUN apk add --no-cache \
openssl \
curl \
mariadb-connector-c \
ca-certificates
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -1,95 +0,0 @@
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
# It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.12.0e
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.40 as build
# set sqlite as default for DB ARG for backward compatibility
ARG DB=sqlite
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
# Don't download rust docs
RUN rustup set profile minimal
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM debian:buster-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
sqlite3 \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -1,97 +0,0 @@
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
# It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.12.0e
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# Musl build image for statically compiled binary
FROM clux/muslrust:nightly-2019-12-19 as build
# set sqlite as default for DB ARG for backward compatibility
ARG DB=sqlite
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
# Don't download rust docs
RUN rustup set profile minimal
ENV USER "root"
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
RUN rustup target add x86_64-unknown-linux-musl
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM alpine:3.11
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs
# Install needed libraries
RUN apk add --no-cache \
openssl \
curl \
sqlite \
ca-certificates
RUN mkdir /data
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -1,29 +1,34 @@
# This file was generated using a Jinja2 template. # This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's. # Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build: # Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/ # https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/ # https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE ####################### ####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable. # Using the digest instead of the tag name provides better security,
# It can be viewed in multiple ways: # as the digest of an image is immutable, whereas a tag name can later
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # be changed to point to a malicious image.
# - From the console, with the following commands: #
# docker pull bitwardenrs/web-vault:v2.12.0e # To verify the current digest for a given tag name:
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e # - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
# # click the tag name to view the digest of the image it currently points to.
# - To do the opposite, and get the tag from the hash, you can do: # - From the command line:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c # $ docker pull bitwardenrs/web-vault:v2.19.0
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault # $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
# [bitwardenrs/web-vault:v2.19.0]
#
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because FROM rust:1.50 as build
# we need the Rust compiler and Cargo tooling
FROM rust:1.40 as build
# set sqlite as default for DB ARG for backward compatibility # Debian-based builds support multidb
ARG DB=sqlite ARG DB=sqlite,mysql,postgresql
# Build time options to avoid dpkg warnings and help with reproducible builds. # Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
@@ -32,6 +37,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
RUN rustup set profile minimal RUN rustup set profile minimal
# Install required build libs for arm64 architecture. # Install required build libs for arm64 architecture.
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \ /etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture arm64 \ && dpkg --add-architecture arm64 \
@@ -39,7 +45,11 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \ --no-install-recommends \
libssl-dev:arm64 \ libssl-dev:arm64 \
libc6-dev:arm64 libc6-dev:arm64 \
libpq5:arm64 \
libpq-dev \
libmariadb-dev:arm64 \
libmariadb-dev-compat:arm64
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y \ && apt-get install -y \
@@ -47,7 +57,8 @@ RUN apt-get update \
gcc-aarch64-linux-gnu \ gcc-aarch64-linux-gnu \
&& mkdir -p ~/.cargo \ && mkdir -p ~/.cargo \
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \ && echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config && echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config \
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo" ENV CARGO_HOME "/root/.cargo"
ENV USER "root" ENV USER "root"
@@ -61,6 +72,22 @@ COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs COPY ./build.rs ./build.rs
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
# What we can do is a force install, because nothing important is overlapping each other.
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
apt-get download libmariadb-dev-compat:amd64 && \
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
rm -rvf ./libmariadb-dev-compat*.deb
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
# Without this specific file the ld command will fail and compilation fails with it.
RUN ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
ENV CROSS_COMPILE="1" ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
@@ -70,7 +97,7 @@ RUN rustup target add aarch64-unknown-linux-gnu
# Builds your dependencies and removes the # Builds your dependencies and removes the
# dummy project, except the target folder # dummy project, except the target folder
# This folder contains the compiled dependencies # This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
RUN find . -not -path "./target*" -delete RUN find . -not -path "./target*" -delete
# Copies the complete project # Copies the complete project
@@ -101,7 +128,10 @@ RUN apt-get update && apt-get install -y \
openssl \ openssl \
ca-certificates \ ca-certificates \
curl \ curl \
dumb-init \
sqlite3 \ sqlite3 \
libmariadb-dev-compat \
libpq5 \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
RUN mkdir /data RUN mkdir /data
@@ -118,10 +148,12 @@ COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs . COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1 HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

View File

@@ -1,29 +1,34 @@
# This file was generated using a Jinja2 template. # This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's. # Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build: # Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/ # https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/ # https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE ####################### ####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable. # Using the digest instead of the tag name provides better security,
# It can be viewed in multiple ways: # as the digest of an image is immutable, whereas a tag name can later
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # be changed to point to a malicious image.
# - From the console, with the following commands: #
# docker pull bitwardenrs/web-vault:v2.12.0e # To verify the current digest for a given tag name:
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e # - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
# # click the tag name to view the digest of the image it currently points to.
# - To do the opposite, and get the tag from the hash, you can do: # - From the command line:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c # $ docker pull bitwardenrs/web-vault:v2.19.0
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault # $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
# [bitwardenrs/web-vault:v2.19.0]
#
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because FROM rust:1.50 as build
# we need the Rust compiler and Cargo tooling
FROM rust:1.40 as build
# set sqlite as default for DB ARG for backward compatibility # Debian-based builds support multidb
ARG DB=sqlite ARG DB=sqlite,mysql,postgresql
# Build time options to avoid dpkg warnings and help with reproducible builds. # Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
@@ -32,6 +37,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
RUN rustup set profile minimal RUN rustup set profile minimal
# Install required build libs for armel architecture. # Install required build libs for armel architecture.
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \ /etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armel \ && dpkg --add-architecture armel \
@@ -39,7 +45,11 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \ --no-install-recommends \
libssl-dev:armel \ libssl-dev:armel \
libc6-dev:armel libc6-dev:armel \
libpq5:armel \
libpq-dev \
libmariadb-dev:armel \
libmariadb-dev-compat:armel
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y \ && apt-get install -y \
@@ -47,7 +57,8 @@ RUN apt-get update \
gcc-arm-linux-gnueabi \ gcc-arm-linux-gnueabi \
&& mkdir -p ~/.cargo \ && mkdir -p ~/.cargo \
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \ && echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config && echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config \
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo" ENV CARGO_HOME "/root/.cargo"
ENV USER "root" ENV USER "root"
@@ -61,6 +72,22 @@ COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs COPY ./build.rs ./build.rs
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
# What we can do is a force install, because nothing important is overlapping each other.
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
apt-get download libmariadb-dev-compat:amd64 && \
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
rm -rvf ./libmariadb-dev-compat*.deb
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
# Without this specific file the ld command will fail and compilation fails with it.
RUN ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
ENV CROSS_COMPILE="1" ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
@@ -70,7 +97,7 @@ RUN rustup target add arm-unknown-linux-gnueabi
# Builds your dependencies and removes the # Builds your dependencies and removes the
# dummy project, except the target folder # dummy project, except the target folder
# This folder contains the compiled dependencies # This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
RUN find . -not -path "./target*" -delete RUN find . -not -path "./target*" -delete
# Copies the complete project # Copies the complete project
@@ -101,7 +128,10 @@ RUN apt-get update && apt-get install -y \
openssl \ openssl \
ca-certificates \ ca-certificates \
curl \ curl \
dumb-init \
sqlite3 \ sqlite3 \
libmariadb-dev-compat \
libpq5 \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
RUN mkdir /data RUN mkdir /data
@@ -118,10 +148,12 @@ COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs . COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1 HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

View File

@@ -1,133 +0,0 @@
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
# It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.12.0e
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.40 as build
# set mysql backend
ARG DB=mysql
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
# Don't download rust docs
RUN rustup set profile minimal
# Install required build libs for armel architecture.
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armel \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armel \
libc6-dev:armel
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabi \
&& mkdir -p ~/.cargo \
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
# Install MySQL package
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libmariadb-dev:armel \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
RUN rustup target add arm-unknown-linux-gnueabi
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/rpi-debian:buster
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
libmariadbclient-dev \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

View File

@@ -1,29 +1,34 @@
# This file was generated using a Jinja2 template. # This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's. # Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build: # Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/ # https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/ # https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE ####################### ####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable. # Using the digest instead of the tag name provides better security,
# It can be viewed in multiple ways: # as the digest of an image is immutable, whereas a tag name can later
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there. # be changed to point to a malicious image.
# - From the console, with the following commands: #
# docker pull bitwardenrs/web-vault:v2.12.0e # To verify the current digest for a given tag name:
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e # - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
# # click the tag name to view the digest of the image it currently points to.
# - To do the opposite, and get the tag from the hash, you can do: # - From the command line:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c # $ docker pull bitwardenrs/web-vault:v2.19.0
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault # $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
# [bitwardenrs/web-vault:v2.19.0]
#
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because FROM rust:1.50 as build
# we need the Rust compiler and Cargo tooling
FROM rust:1.40 as build
# set sqlite as default for DB ARG for backward compatibility # Debian-based builds support multidb
ARG DB=sqlite ARG DB=sqlite,mysql,postgresql
# Build time options to avoid dpkg warnings and help with reproducible builds. # Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
@@ -32,6 +37,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
RUN rustup set profile minimal RUN rustup set profile minimal
# Install required build libs for armhf architecture. # Install required build libs for armhf architecture.
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \ /etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armhf \ && dpkg --add-architecture armhf \
@@ -39,7 +45,11 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \ --no-install-recommends \
libssl-dev:armhf \ libssl-dev:armhf \
libc6-dev:armhf libc6-dev:armhf \
libpq5:armhf \
libpq-dev \
libmariadb-dev:armhf \
libmariadb-dev-compat:armhf
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y \ && apt-get install -y \
@@ -47,7 +57,8 @@ RUN apt-get update \
gcc-arm-linux-gnueabihf \ gcc-arm-linux-gnueabihf \
&& mkdir -p ~/.cargo \ && mkdir -p ~/.cargo \
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \ && echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config && echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config \
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo" ENV CARGO_HOME "/root/.cargo"
ENV USER "root" ENV USER "root"
@@ -61,15 +72,32 @@ COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs COPY ./build.rs ./build.rs
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
# What we can do is a force install, because nothing important is overlapping each other.
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
apt-get download libmariadb-dev-compat:amd64 && \
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
rm -rvf ./libmariadb-dev-compat*.deb
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
# Without this specific file the ld command will fail and compilation fails with it.
RUN ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
ENV CROSS_COMPILE="1" ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf" ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
RUN rustup target add armv7-unknown-linux-gnueabihf RUN rustup target add armv7-unknown-linux-gnueabihf
# Builds your dependencies and removes the # Builds your dependencies and removes the
# dummy project, except the target folder # dummy project, except the target folder
# This folder contains the compiled dependencies # This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
RUN find . -not -path "./target*" -delete RUN find . -not -path "./target*" -delete
# Copies the complete project # Copies the complete project
@@ -100,7 +128,10 @@ RUN apt-get update && apt-get install -y \
openssl \ openssl \
ca-certificates \ ca-certificates \
curl \ curl \
dumb-init \
sqlite3 \ sqlite3 \
libmariadb-dev-compat \
libpq5 \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
RUN mkdir /data RUN mkdir /data
@@ -117,10 +148,12 @@ COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs . COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1 HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
CMD ["/bitwarden_rs"] ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

View File

@@ -0,0 +1,114 @@
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# Using the digest instead of the tag name provides better security,
# as the digest of an image is immutable, whereas a tag name can later
# be changed to point to a malicious image.
#
# To verify the current digest for a given tag name:
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull bitwardenrs/web-vault:v2.19.0
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
# [bitwardenrs/web-vault:v2.19.0]
#
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
########################## BUILD IMAGE ##########################
FROM messense/rust-musl-cross:armv7-musleabihf as build
# Alpine-based ARM (musl) only supports sqlite during compile time.
ARG DB=sqlite
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
# Don't download rust docs
RUN rustup set profile minimal
ENV USER "root"
ENV RUSTFLAGS='-C link-arg=-s'
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
RUN rustup target add armv7-unknown-linux-musleabihf
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/bitwarden_rs
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/armv7hf-alpine:3.13
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apk add --no-cache \
openssl \
curl \
dumb-init \
sqlite \
ca-certificates
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/bitwarden_rs .
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup!
WORKDIR /
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

View File

@@ -1,132 +0,0 @@
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
# It can be viewed in multiple ways:
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
# - From the console, with the following commands:
# docker pull bitwardenrs/web-vault:v2.12.0e
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.12.0e
#
# - To do the opposite, and get the tag from the hash, you can do:
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c
FROM bitwardenrs/web-vault@sha256:feb3f46d15738191b9043be4cdb1be2c0078ed411e7b7be73a2f4fcbca01e13c as vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust:1.40 as build
# set mysql backend
ARG DB=mysql
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
# Don't download rust docs
RUN rustup set profile minimal
# Install required build libs for armhf architecture.
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armhf \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armhf \
libc6-dev:armhf
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc-arm-linux-gnueabihf \
&& mkdir -p ~/.cargo \
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
# Install MySQL package
RUN apt-get update && apt-get install -y \
--no-install-recommends \
libmariadb-dev:armhf \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
RUN rustup target add armv7-unknown-linux-gnueabihf
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release
RUN find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/armv7hf-debian:buster
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
RUN [ "cross-build-start" ]
# Install needed libraries
RUN apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
libmariadbclient-dev \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /data
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
COPY docker/healthcheck.sh ./healthcheck.sh
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
# Configures the startup!
WORKDIR /
CMD ["/bitwarden_rs"]

59
docker/healthcheck.sh Normal file → Executable file
View File

@@ -1,8 +1,53 @@
#!/usr/bin/env sh #!/bin/sh
if [ -z "$ROCKET_TLS"] # Use the value of the corresponding env var (if present),
then # or a default value otherwise.
curl --fail http://localhost:${ROCKET_PORT:-"80"}/alive || exit 1 : ${DATA_FOLDER:="data"}
else : ${ROCKET_PORT:="80"}
curl --insecure --fail https://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
fi CONFIG_FILE="${DATA_FOLDER}"/config.json
# Given a config key, return the corresponding config value from the
# config file. If the key doesn't exist, return an empty string.
get_config_val() {
local key="$1"
# Extract a line of the form:
# "domain": "https://bw.example.com/path",
grep "\"${key}\":" "${CONFIG_FILE}" |
# To extract just the value (https://bw.example.com/path), delete:
# (1) everything up to and including the first ':',
# (2) whitespace and '"' from the front,
# (3) ',' and '"' from the back.
sed -e 's/[^:]\+://' -e 's/^[ "]\+//' -e 's/[,"]\+$//'
}
# Extract the base path from a domain URL. For example:
# - `` -> ``
# - `https://bw.example.com` -> ``
# - `https://bw.example.com/` -> ``
# - `https://bw.example.com/path` -> `/path`
# - `https://bw.example.com/multi/path` -> `/multi/path`
get_base_path() {
echo "$1" |
# Delete:
# (1) everything up to and including '://',
# (2) everything up to '/',
# (3) trailing '/' from the back.
sed -e 's|.*://||' -e 's|[^/]\+||' -e 's|/*$||'
}
# Read domain URL from config.json, if present.
if [ -r "${CONFIG_FILE}" ]; then
domain="$(get_config_val 'domain')"
if [ -n "${domain}" ]; then
# config.json 'domain' overrides the DOMAIN env var.
DOMAIN="${domain}"
fi
fi
base_path="$(get_base_path "${DOMAIN}")"
if [ -n "${ROCKET_TLS}" ]; then
s='s'
fi
curl --insecure --fail --silent --show-error \
"http${s}://localhost:${ROCKET_PORT}${base_path}/alive" || exit 1

15
docker/start.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/sh
if [ -r /etc/bitwarden_rs.sh ]; then
. /etc/bitwarden_rs.sh
fi
if [ -d /etc/bitwarden_rs.d ]; then
for f in /etc/bitwarden_rs.d/*.sh; do
if [ -r $f ]; then
. $f
fi
done
fi
exec /bitwarden_rs "${@}"

20
hooks/README.md Normal file
View File

@@ -0,0 +1,20 @@
The hooks in this directory are used to create multi-arch images using Docker Hub automated builds.
Docker Hub hooks provide these predefined [environment variables](https://docs.docker.com/docker-hub/builds/advanced/#environment-variables-for-building-and-testing):
* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested.
* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested.
* `COMMIT_MSG`: the message from the commit being tested and built.
* `DOCKER_REPO`: the name of the Docker repository being built.
* `DOCKERFILE_PATH`: the dockerfile currently being built.
* `DOCKER_TAG`: the Docker repository tag being built.
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
## References
* https://docs.docker.com/docker-hub/builds/advanced/
* https://docs.docker.com/engine/reference/commandline/manifest/
* https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/
* https://success.docker.com/article/how-do-i-authenticate-with-the-v2-api

16
hooks/arches.sh Normal file
View File

@@ -0,0 +1,16 @@
# The default Debian-based images support these arches for all database backends.
arches=(
amd64
armv6
armv7
arm64
)
if [[ "${DOCKER_TAG}" == *alpine ]]; then
# The Alpine image build currently only works for certain arches.
distro_suffix=.alpine
arches=(
amd64
armv7
)
fi

45
hooks/build Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/bash
echo ">>> Building images..."
source ./hooks/arches.sh
if [[ -z "${SOURCE_COMMIT}" ]]; then
# This var is typically predefined by Docker Hub, but it won't be
# when testing locally.
SOURCE_COMMIT="$(git rev-parse HEAD)"
fi
# Construct a version string in the style of `build.rs`.
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
if [[ -n "${GIT_EXACT_TAG}" ]]; then
SOURCE_VERSION="${GIT_EXACT_TAG}"
else
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
fi
LABELS=(
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
org.opencontainers.image.documentation="https://github.com/dani-garcia/bitwarden_rs/wiki"
org.opencontainers.image.licenses="GPL-3.0-only"
org.opencontainers.image.revision="${SOURCE_COMMIT}"
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
org.opencontainers.image.url="https://hub.docker.com/r/${DOCKER_REPO#*/}"
org.opencontainers.image.version="${SOURCE_VERSION}"
)
LABEL_ARGS=()
for label in "${LABELS[@]}"; do
LABEL_ARGS+=(--label "${label}")
done
set -ex
for arch in "${arches[@]}"; do
docker build \
"${LABEL_ARGS[@]}" \
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
-f docker/${arch}/Dockerfile${distro_suffix} \
.
done

28
hooks/pre_build Executable file
View File

@@ -0,0 +1,28 @@
#!/bin/bash
set -ex
# If requested, print some environment info for troubleshooting.
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
id
pwd
df -h
env
docker info
docker version
fi
# Install build dependencies.
deps=(
jq
)
apt-get update
apt-get install -y "${deps[@]}"
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
# Git operations that we perform later, so fetch the complete history and
# tags first. Note that if the build is cached, the clone may have been
# unshallowed already; if so, unshallowing will fail, so skip it.
if [[ -f .git/shallow ]]; then
git fetch --unshallow --tags
fi

138
hooks/push Executable file
View File

@@ -0,0 +1,138 @@
#!/bin/bash
source ./hooks/arches.sh
export DOCKER_CLI_EXPERIMENTAL=enabled
# Join a list of args with a single char.
# Ref: https://stackoverflow.com/a/17841619
join() { local IFS="$1"; shift; echo "$*"; }
set -ex
echo ">>> Starting local Docker registry..."
# Docker Buildx's `docker-container` driver is needed for multi-platform
# builds, but it can't access existing images on the Docker host (like the
# cross-compiled ones we just built). Those images first need to be pushed to
# a registry -- Docker Hub could be used, but since it's not trivial to clean
# up those intermediate images on Docker Hub, it's easier to just run a local
# Docker registry, which gets cleaned up automatically once the build job ends.
#
# https://docs.docker.com/registry/deploying/
# https://hub.docker.com/_/registry
#
# Use host networking so the buildx container can access the registry via
# localhost.
#
docker run -d --name registry --network host registry:2 # defaults to port 5000
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
LOCAL_REGISTRY="localhost:5000"
REPO="${DOCKER_REPO#*/}"
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
echo ">>> Pushing images to local registry..."
for arch in ${arches[@]}; do
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
docker tag "${docker_image}" "${local_image}"
docker push "${local_image}"
done
echo ">>> Setting up Docker Buildx..."
# Same as earlier, use host networking so the buildx container can access the
# registry via localhost.
#
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
#
docker buildx create --name builder --use --driver-opt network=host
echo ">>> Running Docker Buildx..."
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
# If the Docker tag starts with a version number, assume the latest release
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
# to make it easier for users to track the latest release.
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
if [[ "${DOCKER_TAG}" == *alpine ]]; then
tags+=(${DOCKER_REPO}:alpine)
else
tags+=(${DOCKER_REPO}:latest)
fi
fi
tag_args=()
for tag in "${tags[@]}"; do
tag_args+=(--tag "${tag}")
done
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
# the arch list to a platform list (assuming the OS is always `linux`).
declare -A arch_to_platform=(
[amd64]="linux/amd64"
[armv6]="linux/arm/v6"
[armv7]="linux/arm/v7"
[arm64]="linux/arm64"
)
platforms=()
for arch in ${arches[@]}; do
platforms+=("${arch_to_platform[$arch]}")
done
platforms="$(join "," "${platforms[@]}")"
# Run the build, pushing the resulting images and multi-arch manifest list to
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
# context, which isn't needed here since the actual cross-compiled images
# have already been built.
docker buildx build \
--network host \
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
--platform "${platforms}" \
"${tag_args[@]}" \
--push \
- < ./docker/Dockerfile.buildx
# Add an extra arch-specific tag for `arm32v6`; Docker can't seem to properly
# auto-select that image on ARMv6 platforms like Raspberry Pi 1 and Zero
# (https://github.com/moby/moby/issues/41017).
#
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
# existing bitwarden_rs tags, which adhere to the naming conventions of the
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
# Unfortunately, these per-arch repo names aren't always consistent with the
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
# 32-bit ARM arches (e.g., `linux/arm/v6` is used, not `linux/arm32/v6`).
#
# TODO: It looks like this issue should be fixed starting in Docker 20.10.0,
# so this step can be removed once fixed versions are in wider distribution.
#
# Tags:
#
# testing => testing-arm32v6
# testing-alpine => <ignored>
# x.y.z => x.y.z-arm32v6, latest-arm32v6
# x.y.z-alpine => <ignored>
#
if [[ "${DOCKER_TAG}" != *alpine ]]; then
image="${DOCKER_REPO}":"${DOCKER_TAG}"
# Fetch the multi-arch manifest list and find the digest of the armv6 image.
filter='.manifests|.[]|select(.platform.architecture=="arm" and .platform.variant=="v6")|.digest'
digest="$(docker manifest inspect "${image}" | jq -r "${filter}")"
# Pull the armv6 image by digest, retag it, and repush it.
docker pull "${DOCKER_REPO}"@"${digest}"
docker tag "${DOCKER_REPO}"@"${digest}" "${image}"-arm32v6
docker push "${image}"-arm32v6
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
docker tag "${image}"-arm32v6 "${DOCKER_REPO}:latest"-arm32v6
docker push "${DOCKER_REPO}:latest"-arm32v6
fi
fi

View File

@@ -0,0 +1,3 @@
ALTER TABLE ciphers
ADD COLUMN
deleted_at DATETIME;

View File

@@ -0,0 +1,2 @@
ALTER TABLE users_collections
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE;

View File

@@ -0,0 +1,13 @@
ALTER TABLE ciphers
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE;
-- Transfer favorite status for user-owned ciphers.
UPDATE ciphers
SET favorite = TRUE
WHERE EXISTS (
SELECT * FROM favorites
WHERE favorites.user_uuid = ciphers.user_uuid
AND favorites.cipher_uuid = ciphers.uuid
);
DROP TABLE favorites;

View File

@@ -0,0 +1,16 @@
CREATE TABLE favorites (
user_uuid CHAR(36) NOT NULL REFERENCES users(uuid),
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers(uuid),
PRIMARY KEY (user_uuid, cipher_uuid)
);
-- Transfer favorite status for user-owned ciphers.
INSERT INTO favorites(user_uuid, cipher_uuid)
SELECT user_uuid, uuid
FROM ciphers
WHERE favorite = TRUE
AND user_uuid IS NOT NULL;
ALTER TABLE ciphers
DROP COLUMN favorite;

View File

@@ -0,0 +1 @@
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;

View File

@@ -0,0 +1 @@
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;

View File

@@ -0,0 +1 @@
DROP TABLE sends;

View File

@@ -0,0 +1,25 @@
CREATE TABLE sends (
uuid CHAR(36) NOT NULL PRIMARY KEY,
user_uuid CHAR(36) REFERENCES users (uuid),
organization_uuid CHAR(36) REFERENCES organizations (uuid),
name TEXT NOT NULL,
notes TEXT,
atype INTEGER NOT NULL,
data TEXT NOT NULL,
akey TEXT NOT NULL,
password_hash BLOB,
password_salt BLOB,
password_iter INTEGER,
max_access_count INTEGER,
access_count INTEGER NOT NULL,
creation_date DATETIME NOT NULL,
revision_date DATETIME NOT NULL,
expiration_date DATETIME,
deletion_date DATETIME NOT NULL,
disabled BOOLEAN NOT NULL
);

View File

@@ -0,0 +1,3 @@
ALTER TABLE ciphers
ADD COLUMN
deleted_at TIMESTAMP;

View File

@@ -0,0 +1,2 @@
ALTER TABLE users_collections
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE;

View File

@@ -0,0 +1,13 @@
ALTER TABLE ciphers
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE;
-- Transfer favorite status for user-owned ciphers.
UPDATE ciphers
SET favorite = TRUE
WHERE EXISTS (
SELECT * FROM favorites
WHERE favorites.user_uuid = ciphers.user_uuid
AND favorites.cipher_uuid = ciphers.uuid
);
DROP TABLE favorites;

View File

@@ -0,0 +1,16 @@
CREATE TABLE favorites (
user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid),
cipher_uuid VARCHAR(40) NOT NULL REFERENCES ciphers(uuid),
PRIMARY KEY (user_uuid, cipher_uuid)
);
-- Transfer favorite status for user-owned ciphers.
INSERT INTO favorites(user_uuid, cipher_uuid)
SELECT user_uuid, uuid
FROM ciphers
WHERE favorite = TRUE
AND user_uuid IS NOT NULL;
ALTER TABLE ciphers
DROP COLUMN favorite;

View File

@@ -0,0 +1 @@
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT true;

View File

@@ -0,0 +1 @@
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;

View File

@@ -0,0 +1 @@
DROP TABLE sends;

View File

@@ -0,0 +1,25 @@
CREATE TABLE sends (
uuid CHAR(36) NOT NULL PRIMARY KEY,
user_uuid CHAR(36) REFERENCES users (uuid),
organization_uuid CHAR(36) REFERENCES organizations (uuid),
name TEXT NOT NULL,
notes TEXT,
atype INTEGER NOT NULL,
data TEXT NOT NULL,
key TEXT NOT NULL,
password_hash BYTEA,
password_salt BYTEA,
password_iter INTEGER,
max_access_count INTEGER,
access_count INTEGER NOT NULL,
creation_date TIMESTAMP NOT NULL,
revision_date TIMESTAMP NOT NULL,
expiration_date TIMESTAMP,
deletion_date TIMESTAMP NOT NULL,
disabled BOOLEAN NOT NULL
);

View File

@@ -0,0 +1 @@
ALTER TABLE sends RENAME COLUMN key TO akey;

View File

@@ -0,0 +1,3 @@
ALTER TABLE ciphers
ADD COLUMN
deleted_at DATETIME;

View File

@@ -0,0 +1,2 @@
ALTER TABLE users_collections
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT 0; -- FALSE

View File

@@ -0,0 +1,13 @@
ALTER TABLE ciphers
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT 0; -- FALSE
-- Transfer favorite status for user-owned ciphers.
UPDATE ciphers
SET favorite = 1
WHERE EXISTS (
SELECT * FROM favorites
WHERE favorites.user_uuid = ciphers.user_uuid
AND favorites.cipher_uuid = ciphers.uuid
);
DROP TABLE favorites;

View File

@@ -0,0 +1,71 @@
CREATE TABLE favorites (
user_uuid TEXT NOT NULL REFERENCES users(uuid),
cipher_uuid TEXT NOT NULL REFERENCES ciphers(uuid),
PRIMARY KEY (user_uuid, cipher_uuid)
);
-- Transfer favorite status for user-owned ciphers.
INSERT INTO favorites(user_uuid, cipher_uuid)
SELECT user_uuid, uuid
FROM ciphers
WHERE favorite = 1
AND user_uuid IS NOT NULL;
-- Drop the `favorite` column from the `ciphers` table, using the 12-step
-- procedure from <https://www.sqlite.org/lang_altertable.html#altertabrename>.
-- Note that some steps aren't applicable and are omitted.
-- 1. If foreign key constraints are enabled, disable them using PRAGMA foreign_keys=OFF.
--
-- Diesel runs each migration in its own transaction. `PRAGMA foreign_keys`
-- is a no-op within a transaction, so this step must be done outside of this
-- file, before starting the Diesel migrations.
-- 2. Start a transaction.
--
-- Diesel already runs each migration in its own transaction.
-- 4. Use CREATE TABLE to construct a new table "new_X" that is in the
-- desired revised format of table X. Make sure that the name "new_X" does
-- not collide with any existing table name, of course.
CREATE TABLE new_ciphers(
uuid TEXT NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
user_uuid TEXT REFERENCES users(uuid),
organization_uuid TEXT REFERENCES organizations(uuid),
atype INTEGER NOT NULL,
name TEXT NOT NULL,
notes TEXT,
fields TEXT,
data TEXT NOT NULL,
password_history TEXT,
deleted_at DATETIME
);
-- 5. Transfer content from X into new_X using a statement like:
-- INSERT INTO new_X SELECT ... FROM X.
INSERT INTO new_ciphers(uuid, created_at, updated_at, user_uuid, organization_uuid, atype,
name, notes, fields, data, password_history, deleted_at)
SELECT uuid, created_at, updated_at, user_uuid, organization_uuid, atype,
name, notes, fields, data, password_history, deleted_at
FROM ciphers;
-- 6. Drop the old table X: DROP TABLE X.
DROP TABLE ciphers;
-- 7. Change the name of new_X to X using: ALTER TABLE new_X RENAME TO X.
ALTER TABLE new_ciphers RENAME TO ciphers;
-- 11. Commit the transaction started in step 2.
-- 12. If foreign keys constraints were originally enabled, reenable them now.
--
-- `PRAGMA foreign_keys` is scoped to a database connection, and Diesel
-- migrations are run in a separate database connection that is closed once
-- the migrations finish.

View File

@@ -0,0 +1 @@
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;

View File

@@ -0,0 +1 @@
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;

View File

@@ -0,0 +1 @@
DROP TABLE sends;

View File

@@ -0,0 +1,25 @@
CREATE TABLE sends (
uuid TEXT NOT NULL PRIMARY KEY,
user_uuid TEXT REFERENCES users (uuid),
organization_uuid TEXT REFERENCES organizations (uuid),
name TEXT NOT NULL,
notes TEXT,
atype INTEGER NOT NULL,
data TEXT NOT NULL,
key TEXT NOT NULL,
password_hash BLOB,
password_salt BLOB,
password_iter INTEGER,
max_access_count INTEGER,
access_count INTEGER NOT NULL,
creation_date DATETIME NOT NULL,
revision_date DATETIME NOT NULL,
expiration_date DATETIME,
deletion_date DATETIME NOT NULL,
disabled BOOLEAN NOT NULL
);

View File

@@ -0,0 +1 @@
ALTER TABLE sends RENAME COLUMN key TO akey;

View File

@@ -1 +1 @@
nightly-2020-03-09 nightly-2021-02-22

View File

@@ -1,46 +1,75 @@
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use serde::de::DeserializeOwned;
use serde_json::Value; use serde_json::Value;
use std::process::Command; use std::{env, process::Command, time::Duration};
use rocket::http::{Cookie, Cookies, SameSite}; use reqwest::{blocking::Client, header::USER_AGENT};
use rocket::request::{self, FlashMessage, Form, FromRequest, Request}; use rocket::{
use rocket::response::{content::Html, Flash, Redirect}; http::{Cookie, Cookies, SameSite},
use rocket::{Outcome, Route}; request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
response::{content::Html, Flash, Redirect},
Route,
};
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use crate::api::{ApiResult, EmptyResult, JsonResult}; use crate::{
use crate::auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp}; api::{ApiResult, EmptyResult, NumberOrString},
use crate::config::ConfigBuilder; auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
use crate::db::{backup_database, models::*, DbConn}; config::ConfigBuilder,
use crate::error::Error; db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
use crate::mail; error::{Error, MapResult},
use crate::CONFIG; mail,
util::{format_naive_datetime_local, get_display_size, is_running_in_docker},
CONFIG,
};
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
if CONFIG.admin_token().is_none() && !CONFIG.disable_admin_token() { if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
return routes![admin_disabled]; return routes![admin_disabled];
} }
routes![ routes![
admin_login, admin_login,
get_users, get_users_json,
post_admin_login, post_admin_login,
admin_page, admin_page,
invite_user, invite_user,
logout, logout,
delete_user, delete_user,
deauth_user, deauth_user,
disable_user,
enable_user,
remove_2fa, remove_2fa,
update_user_org_type,
update_revision_users, update_revision_users,
post_config, post_config,
delete_config, delete_config,
backup_db, backup_db,
test_smtp, test_smtp,
users_overview,
organizations_overview,
delete_organization,
diagnostics,
get_diagnostics_config
] ]
} }
static CAN_BACKUP: Lazy<bool> = static DB_TYPE: Lazy<&str> = Lazy::new(|| {
Lazy::new(|| cfg!(feature = "sqlite") && Command::new("sqlite3").arg("-version").status().is_ok()); DbConnType::from_url(&CONFIG.database_url())
.map(|t| match t {
DbConnType::sqlite => "SQLite",
DbConnType::mysql => "MySQL",
DbConnType::postgresql => "PostgreSQL",
})
.unwrap_or("Unknown")
});
static CAN_BACKUP: Lazy<bool> = Lazy::new(|| {
DbConnType::from_url(&CONFIG.database_url())
.map(|t| t == DbConnType::sqlite)
.unwrap_or(false)
&& Command::new("sqlite3").arg("-version").status().is_ok()
});
#[get("/")] #[get("/")]
fn admin_disabled() -> &'static str { fn admin_disabled() -> &'static str {
@@ -51,12 +80,64 @@ const COOKIE_NAME: &str = "BWRS_ADMIN";
const ADMIN_PATH: &str = "/admin"; const ADMIN_PATH: &str = "/admin";
const BASE_TEMPLATE: &str = "admin/base"; const BASE_TEMPLATE: &str = "admin/base";
const VERSION: Option<&str> = option_env!("GIT_VERSION"); const VERSION: Option<&str> = option_env!("BWRS_VERSION");
fn admin_path() -> String { fn admin_path() -> String {
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH) format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
} }
struct Referer(Option<String>);
impl<'a, 'r> FromRequest<'a, 'r> for Referer {
type Error = ();
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string)))
}
}
#[derive(Debug)]
struct IpHeader(Option<String>);
impl<'a, 'r> FromRequest<'a, 'r> for IpHeader {
type Error = ();
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
if req.headers().get_one(&CONFIG.ip_header()).is_some() {
Outcome::Success(IpHeader(Some(CONFIG.ip_header())))
} else if req.headers().get_one("X-Client-IP").is_some() {
Outcome::Success(IpHeader(Some(String::from("X-Client-IP"))))
} else if req.headers().get_one("X-Real-IP").is_some() {
Outcome::Success(IpHeader(Some(String::from("X-Real-IP"))))
} else if req.headers().get_one("X-Forwarded-For").is_some() {
Outcome::Success(IpHeader(Some(String::from("X-Forwarded-For"))))
} else {
Outcome::Success(IpHeader(None))
}
}
}
/// Used for `Location` response headers, which must specify an absolute URI
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
fn admin_url(referer: Referer) -> String {
// If we get a referer use that to make it work when, DOMAIN is not set
if let Some(mut referer) = referer.0 {
if let Some(start_index) = referer.find(ADMIN_PATH) {
referer.truncate(start_index + ADMIN_PATH.len());
return referer;
}
}
if CONFIG.domain_set() {
// Don't use CONFIG.domain() directly, since the user may want to keep a
// trailing slash there, particularly when running under a subpath.
format!("{}{}{}", CONFIG.domain_origin(), CONFIG.domain_path(), ADMIN_PATH)
} else {
// Last case, when no referer or domain set, technically invalid but better than nothing
ADMIN_PATH.to_string()
}
}
#[get("/", rank = 2)] #[get("/", rank = 2)]
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> { fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
// If there is an error, show it // If there is an error, show it
@@ -74,14 +155,19 @@ struct LoginForm {
} }
#[post("/", data = "<data>")] #[post("/", data = "<data>")]
fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -> Result<Redirect, Flash<Redirect>> { fn post_admin_login(
data: Form<LoginForm>,
mut cookies: Cookies,
ip: ClientIp,
referer: Referer,
) -> Result<Redirect, Flash<Redirect>> {
let data = data.into_inner(); let data = data.into_inner();
// If the token is invalid, redirect to login page // If the token is invalid, redirect to login page
if !_validate_token(&data.token) { if !_validate_token(&data.token) {
error!("Invalid admin token. IP: {}", ip.ip); error!("Invalid admin token. IP: {}", ip.ip);
Err(Flash::error( Err(Flash::error(
Redirect::to(admin_path()), Redirect::to(admin_url(referer)),
"Invalid admin token, please try again.", "Invalid admin token, please try again.",
)) ))
} else { } else {
@@ -97,7 +183,7 @@ fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -
.finish(); .finish();
cookies.add(cookie); cookies.add(cookie);
Ok(Redirect::to(admin_path())) Ok(Redirect::to(admin_url(referer)))
} }
} }
@@ -112,7 +198,9 @@ fn _validate_token(token: &str) -> bool {
struct AdminTemplateData { struct AdminTemplateData {
page_content: String, page_content: String,
version: Option<&'static str>, version: Option<&'static str>,
users: Vec<Value>, users: Option<Vec<Value>>,
organizations: Option<Vec<Value>>,
diagnostics: Option<Value>,
config: Value, config: Value,
can_backup: bool, can_backup: bool,
logged_in: bool, logged_in: bool,
@@ -120,15 +208,59 @@ struct AdminTemplateData {
} }
impl AdminTemplateData { impl AdminTemplateData {
fn new(users: Vec<Value>) -> Self { fn new() -> Self {
Self { Self {
page_content: String::from("admin/page"), page_content: String::from("admin/settings"),
version: VERSION, version: VERSION,
users,
config: CONFIG.prepare_json(), config: CONFIG.prepare_json(),
can_backup: *CAN_BACKUP, can_backup: *CAN_BACKUP,
logged_in: true, logged_in: true,
urlpath: CONFIG.domain_path(), urlpath: CONFIG.domain_path(),
users: None,
organizations: None,
diagnostics: None,
}
}
fn users(users: Vec<Value>) -> Self {
Self {
page_content: String::from("admin/users"),
version: VERSION,
users: Some(users),
config: CONFIG.prepare_json(),
can_backup: *CAN_BACKUP,
logged_in: true,
urlpath: CONFIG.domain_path(),
organizations: None,
diagnostics: None,
}
}
fn organizations(organizations: Vec<Value>) -> Self {
Self {
page_content: String::from("admin/organizations"),
version: VERSION,
organizations: Some(organizations),
config: CONFIG.prepare_json(),
can_backup: *CAN_BACKUP,
logged_in: true,
urlpath: CONFIG.domain_path(),
users: None,
diagnostics: None,
}
}
fn diagnostics(diagnostics: Value) -> Self {
Self {
page_content: String::from("admin/diagnostics"),
version: VERSION,
organizations: None,
config: CONFIG.prepare_json(),
can_backup: *CAN_BACKUP,
logged_in: true,
urlpath: CONFIG.domain_path(),
users: None,
diagnostics: Some(diagnostics),
} }
} }
@@ -138,11 +270,8 @@ impl AdminTemplateData {
} }
#[get("/", rank = 1)] #[get("/", rank = 1)]
fn admin_page(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> { fn admin_page(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
let users = User::get_all(&conn); let text = AdminTemplateData::new().render()?;
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
let text = AdminTemplateData::new(users_json).render()?;
Ok(Html(text)) Ok(Html(text))
} }
@@ -174,69 +303,287 @@ fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> Empt
#[post("/test/smtp", data = "<data>")] #[post("/test/smtp", data = "<data>")]
fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult { fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
let data: InviteData = data.into_inner(); let data: InviteData = data.into_inner();
let email = data.email.clone();
if CONFIG.mail_enabled() { if CONFIG.mail_enabled() {
mail::send_test(&email) mail::send_test(&data.email)
} else { } else {
err!("Mail is not enabled") err!("Mail is not enabled")
} }
} }
#[get("/logout")] #[get("/logout")]
fn logout(mut cookies: Cookies) -> Result<Redirect, ()> { fn logout(mut cookies: Cookies, referer: Referer) -> Redirect {
cookies.remove(Cookie::named(COOKIE_NAME)); cookies.remove(Cookie::named(COOKIE_NAME));
Ok(Redirect::to(admin_path())) Redirect::to(admin_url(referer))
} }
#[get("/users")] #[get("/users")]
fn get_users(_token: AdminToken, conn: DbConn) -> JsonResult { fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
let users = User::get_all(&conn); let users = User::get_all(&conn);
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect(); let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
Ok(Json(Value::Array(users_json))) Json(Value::Array(users_json))
}
#[get("/users/overview")]
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
let users = User::get_all(&conn);
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
let users_json: Vec<Value> = users.iter()
.map(|u| {
let mut usr = u.to_json(&conn);
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
usr["user_enabled"] = json!(u.enabled);
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
usr["last_active"] = match u.last_active(&conn) {
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
None => json!("Never")
};
usr
})
.collect();
let text = AdminTemplateData::users(users_json).render()?;
Ok(Html(text))
} }
#[post("/users/<uuid>/delete")] #[post("/users/<uuid>/delete")]
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let user = match User::find_by_uuid(&uuid, &conn) { let user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
Some(user) => user,
None => err!("User doesn't exist"),
};
user.delete(&conn) user.delete(&conn)
} }
#[post("/users/<uuid>/deauth")] #[post("/users/<uuid>/deauth")]
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let mut user = match User::find_by_uuid(&uuid, &conn) { let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
Some(user) => user,
None => err!("User doesn't exist"),
};
Device::delete_all_by_user(&user.uuid, &conn)?; Device::delete_all_by_user(&user.uuid, &conn)?;
user.reset_security_stamp(); user.reset_security_stamp();
user.save(&conn) user.save(&conn)
} }
#[post("/users/<uuid>/disable")]
fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
Device::delete_all_by_user(&user.uuid, &conn)?;
user.reset_security_stamp();
user.enabled = false;
user.save(&conn)
}
#[post("/users/<uuid>/enable")]
fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
user.enabled = true;
user.save(&conn)
}
#[post("/users/<uuid>/remove-2fa")] #[post("/users/<uuid>/remove-2fa")]
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let mut user = match User::find_by_uuid(&uuid, &conn) { let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
Some(user) => user,
None => err!("User doesn't exist"),
};
TwoFactor::delete_all_by_user(&user.uuid, &conn)?; TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
user.totp_recover = None; user.totp_recover = None;
user.save(&conn) user.save(&conn)
} }
#[derive(Deserialize, Debug)]
struct UserOrgTypeData {
user_type: NumberOrString,
user_uuid: String,
org_uuid: String,
}
#[post("/users/org_type", data = "<data>")]
fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
let data: UserOrgTypeData = data.into_inner();
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn) {
Some(user) => user,
None => err!("The specified user isn't member of the organization"),
};
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) {
Some(new_type) => new_type as i32,
None => err!("Invalid type"),
};
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
// Removing owner permmission, check that there are at least another owner
let num_owners = UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).len();
if num_owners <= 1 {
err!("Can't change the type of the last owner")
}
}
user_to_edit.atype = new_type as i32;
user_to_edit.save(&conn)
}
#[post("/users/update_revision")] #[post("/users/update_revision")]
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult { fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
User::update_all_revisions(&conn) User::update_all_revisions(&conn)
} }
#[get("/organizations/overview")]
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
let organizations = Organization::get_all(&conn);
let organizations_json: Vec<Value> = organizations.iter()
.map(|o| {
let mut org = o.to_json();
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
org
})
.collect();
let text = AdminTemplateData::organizations(organizations_json).render()?;
Ok(Html(text))
}
#[post("/organizations/<uuid>/delete")]
fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
let org = Organization::find_by_uuid(&uuid, &conn).map_res("Organization doesn't exist")?;
org.delete(&conn)
}
#[derive(Deserialize)]
struct WebVaultVersion {
version: String,
}
#[derive(Deserialize)]
struct GitRelease {
tag_name: String,
}
#[derive(Deserialize)]
struct GitCommit {
sha: String,
}
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
let github_api = Client::builder().build()?;
Ok(github_api
.get(url)
.timeout(Duration::from_secs(10))
.header(USER_AGENT, "Bitwarden_RS")
.send()?
.error_for_status()?
.json::<T>()?)
}
fn has_http_access() -> bool {
let http_access = Client::builder().build().unwrap();
match http_access
.head("https://github.com/dani-garcia/bitwarden_rs")
.timeout(Duration::from_secs(10))
.header(USER_AGENT, "Bitwarden_RS")
.send()
{
Ok(r) => r.status().is_success(),
_ => false,
}
}
#[get("/diagnostics")]
fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
use crate::util::read_file_string;
use chrono::prelude::*;
use std::net::ToSocketAddrs;
// Get current running versions
let vault_version_path = format!("{}/{}", CONFIG.web_vault_folder(), "version.json");
let vault_version_str = read_file_string(&vault_version_path)?;
let web_vault_version: WebVaultVersion = serde_json::from_str(&vault_version_str)?;
// Execute some environment checks
let running_within_docker = is_running_in_docker();
let has_http_access = has_http_access();
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|| env::var_os("http_proxy").is_some()
|| env::var_os("HTTPS_PROXY").is_some()
|| env::var_os("https_proxy").is_some();
// Check if we are able to resolve DNS entries
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
Ok(Some(a)) => a.ip().to_string(),
_ => "Could not resolve domain name.".to_string(),
};
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
(
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bitwarden_rs/releases/latest") {
Ok(r) => r.tag_name,
_ => "-".to_string(),
},
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/bitwarden_rs/commits/master") {
Ok(mut c) => {
c.sha.truncate(8);
c.sha
}
_ => "-".to_string(),
},
// Do not fetch the web-vault version when running within Docker.
// The web-vault version is embedded within the container it self, and should not be updated manually
if running_within_docker {
"-".to_string()
} else {
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest") {
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
_ => "-".to_string(),
}
},
)
} else {
("-".to_string(), "-".to_string(), "-".to_string())
};
let ip_header_name = match &ip_header.0 {
Some(h) => h,
_ => ""
};
let diagnostics_json = json!({
"dns_resolved": dns_resolved,
"web_vault_version": web_vault_version.version,
"latest_release": latest_release,
"latest_commit": latest_commit,
"latest_web_build": latest_web_build,
"running_within_docker": running_within_docker,
"has_http_access": has_http_access,
"ip_header_exists": &ip_header.0.is_some(),
"ip_header_match": ip_header_name == CONFIG.ip_header(),
"ip_header_name": ip_header_name,
"ip_header_config": &CONFIG.ip_header(),
"uses_proxy": uses_proxy,
"db_type": *DB_TYPE,
"db_version": get_sql_server_version(&conn),
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
});
let text = AdminTemplateData::diagnostics(diagnostics_json).render()?;
Ok(Html(text))
}
#[get("/diagnostics/config")]
fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
let support_json = CONFIG.get_support_json();
Json(support_json)
}
#[post("/config", data = "<data>")] #[post("/config", data = "<data>")]
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult { fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
let data: ConfigBuilder = data.into_inner(); let data: ConfigBuilder = data.into_inner();

View File

@@ -1,19 +1,16 @@
use chrono::Utc; use chrono::Utc;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value;
use crate::db::models::*; use crate::{
use crate::db::DbConn; api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
crypto,
db::{models::*, DbConn},
mail, CONFIG,
};
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType}; pub fn routes() -> Vec<rocket::Route> {
use crate::auth::{decode_delete, decode_invite, decode_verify_email, Headers};
use crate::crypto;
use crate::mail;
use crate::CONFIG;
use rocket::Route;
pub fn routes() -> Vec<Route> {
routes![ routes![
register, register,
profile, profile,
@@ -36,6 +33,7 @@ pub fn routes() -> Vec<Route> {
revision_date, revision_date,
password_hint, password_hint,
prelogin, prelogin,
verify_password,
] ]
} }
@@ -68,7 +66,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
let mut user = match User::find_by_mail(&data.Email, &conn) { let mut user = match User::find_by_mail(&data.Email, &conn) {
Some(user) => { Some(user) => {
if !user.password_hash.is_empty() { if !user.password_hash.is_empty() {
if CONFIG.signups_allowed() { if CONFIG.is_signup_allowed(&data.Email) {
err!("User already exists") err!("User already exists")
} else { } else {
err!("Registration not allowed or user already exists") err!("Registration not allowed or user already exists")
@@ -89,14 +87,17 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
} }
user user
} else if CONFIG.signups_allowed() { } else if CONFIG.is_signup_allowed(&data.Email) {
err!("Account with this email already exists") err!("Account with this email already exists")
} else { } else {
err!("Registration not allowed or user already exists") err!("Registration not allowed or user already exists")
} }
} }
None => { None => {
if CONFIG.signups_allowed() || Invitation::take(&data.Email, &conn) || CONFIG.can_signup_user(&data.Email) { // Order is important here; the invitation check must come first
// because the bitwarden_rs admin can invite anyone, regardless
// of other signup restrictions.
if Invitation::take(&data.Email, &conn) || CONFIG.is_signup_allowed(&data.Email) {
User::new(data.Email.clone()) User::new(data.Email.clone())
} else { } else {
err!("Registration not allowed or user already exists") err!("Registration not allowed or user already exists")
@@ -115,7 +116,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
user.client_kdf_type = client_kdf_type; user.client_kdf_type = client_kdf_type;
} }
user.set_password(&data.MasterPasswordHash); user.set_password(&data.MasterPasswordHash, None);
user.akey = data.Key; user.akey = data.Key;
// Add extra fields if present // Add extra fields if present
@@ -139,10 +140,8 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
} }
user.last_verifying_at = Some(user.created_at); user.last_verifying_at = Some(user.created_at);
} else { } else if let Err(e) = mail::send_welcome(&user.email) {
if let Err(e) = mail::send_welcome(&user.email) { error!("Error sending welcome email: {:#?}", e);
error!("Error sending welcome email: {:#?}", e);
}
} }
} }
@@ -150,8 +149,8 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
} }
#[get("/accounts/profile")] #[get("/accounts/profile")]
fn profile(headers: Headers, conn: DbConn) -> JsonResult { fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
Ok(Json(headers.user.to_json(&conn))) Json(headers.user.to_json(&conn))
} }
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
@@ -207,7 +206,12 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
user.public_key = Some(data.PublicKey); user.public_key = Some(data.PublicKey);
user.save(&conn)?; user.save(&conn)?;
Ok(Json(user.to_json(&conn)))
Ok(Json(json!({
"PrivateKey": user.private_key,
"PublicKey": user.public_key,
"Object":"keys"
})))
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@@ -227,7 +231,7 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
err!("Invalid password") err!("Invalid password")
} }
user.set_password(&data.NewMasterPasswordHash); user.set_password(&data.NewMasterPasswordHash, Some("post_rotatekey"));
user.akey = data.Key; user.akey = data.Key;
user.save(&conn) user.save(&conn)
} }
@@ -254,7 +258,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
user.client_kdf_iter = data.KdfIterations; user.client_kdf_iter = data.KdfIterations;
user.client_kdf_type = data.Kdf; user.client_kdf_type = data.Kdf;
user.set_password(&data.NewMasterPasswordHash); user.set_password(&data.NewMasterPasswordHash, None);
user.akey = data.Key; user.akey = data.Key;
user.save(&conn) user.save(&conn)
} }
@@ -333,6 +337,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
user.akey = data.Key; user.akey = data.Key;
user.private_key = Some(data.PrivateKey); user.private_key = Some(data.PrivateKey);
user.reset_security_stamp(); user.reset_security_stamp();
user.reset_stamp_exception();
user.save(&conn) user.save(&conn)
} }
@@ -371,8 +376,8 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
err!("Email already in use"); err!("Email already in use");
} }
if !CONFIG.signups_allowed() && !CONFIG.can_signup_user(&data.NewEmail) { if !CONFIG.is_email_domain_allowed(&data.NewEmail) {
err!("Email cannot be changed to this address"); err!("Email domain not allowed");
} }
let token = crypto::generate_token(6)?; let token = crypto::generate_token(6)?;
@@ -440,7 +445,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
user.email_new = None; user.email_new = None;
user.email_new_token = None; user.email_new_token = None;
user.set_password(&data.NewMasterPasswordHash); user.set_password(&data.NewMasterPasswordHash, None);
user.akey = data.Key; user.akey = data.Key;
user.save(&conn) user.save(&conn)
@@ -455,7 +460,7 @@ fn post_verify_email(headers: Headers, _conn: DbConn) -> EmptyResult {
} }
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) { if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
error!("Error sending delete account email: {:#?}", e); error!("Error sending verify_email email: {:#?}", e);
} }
Ok(()) Ok(())
@@ -606,7 +611,7 @@ struct PreloginData {
} }
#[post("/accounts/prelogin", data = "<data>")] #[post("/accounts/prelogin", data = "<data>")]
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult { fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
let data: PreloginData = data.into_inner().data; let data: PreloginData = data.into_inner().data;
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) { let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
@@ -614,8 +619,25 @@ fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT), None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
}; };
Ok(Json(json!({ Json(json!({
"Kdf": kdf_type, "Kdf": kdf_type,
"KdfIterations": kdf_iter "KdfIterations": kdf_iter
}))) }))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct VerifyPasswordData {
MasterPasswordHash: String,
}
#[post("/accounts/verify-password", data = "<data>")]
fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn: DbConn) -> EmptyResult {
let data: VerifyPasswordData = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password")
}
Ok(())
} }

View File

@@ -1,28 +1,33 @@
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::path::Path; use std::path::Path;
use rocket::http::ContentType; use chrono::{NaiveDateTime, Utc};
use rocket::{request::Form, Data, Route}; use rocket::{http::ContentType, request::Form, Data, Route};
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use multipart::server::save::SavedData;
use multipart::server::{Multipart, SaveResult};
use data_encoding::HEXLOWER; use data_encoding::HEXLOWER;
use multipart::server::{save::SavedData, Multipart, SaveResult};
use crate::db::models::*; use crate::{
use crate::db::DbConn; api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
auth::Headers,
use crate::crypto; crypto,
db::{models::*, DbConn},
use crate::api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType}; CONFIG,
use crate::auth::Headers; };
use crate::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
// Note that many routes have an `admin` variant; this seems to be
// because the stored procedure that upstream Bitwarden uses to determine
// whether the user can edit a cipher doesn't take into account whether
// the user is an org owner/admin. The `admin` variant first checks
// whether the user is an owner/admin of the relevant org, and if so,
// allows the operation unconditionally.
//
// bitwarden_rs factors in the org owner/admin status as part of
// determining the write accessibility of a cipher, so most
// admin/non-admin implementations can be shared.
routes![ routes![
sync, sync,
get_ciphers, get_ciphers,
@@ -44,15 +49,24 @@ pub fn routes() -> Vec<Route> {
post_cipher_admin, post_cipher_admin,
post_cipher_share, post_cipher_share,
put_cipher_share, put_cipher_share,
put_cipher_share_seleted, put_cipher_share_selected,
post_cipher, post_cipher,
put_cipher, put_cipher,
delete_cipher_post, delete_cipher_post,
delete_cipher_post_admin, delete_cipher_post_admin,
delete_cipher_put,
delete_cipher_put_admin,
delete_cipher, delete_cipher,
delete_cipher_admin, delete_cipher_admin,
delete_cipher_selected, delete_cipher_selected,
delete_cipher_selected_post, delete_cipher_selected_post,
delete_cipher_selected_put,
delete_cipher_selected_admin,
delete_cipher_selected_post_admin,
delete_cipher_selected_put_admin,
restore_cipher_put,
restore_cipher_put_admin,
restore_cipher_selected,
delete_all, delete_all,
move_cipher_selected, move_cipher_selected,
move_cipher_selected_put, move_cipher_selected_put,
@@ -70,55 +84,64 @@ struct SyncData {
} }
#[get("/sync?<data..>")] #[get("/sync?<data..>")]
fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult { fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> Json<Value> {
let user_json = headers.user.to_json(&conn); let user_json = headers.user.to_json(&conn);
let folders = Folder::find_by_user(&headers.user.uuid, &conn); let folders = Folder::find_by_user(&headers.user.uuid, &conn);
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect(); let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn); let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect(); let collections_json: Vec<Value> = collections.iter()
.map(|c| c.to_json_details(&headers.user.uuid, &conn))
.collect();
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn); let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect(); let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn); let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
let ciphers_json: Vec<Value> = ciphers let ciphers_json: Vec<Value> = ciphers
.iter() .iter()
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)) .map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
.collect(); .collect();
let sends = Send::find_by_user(&headers.user.uuid, &conn);
let sends_json: Vec<Value> = sends
.iter()
.map(|s| s.to_json())
.collect();
let domains_json = if data.exclude_domains { let domains_json = if data.exclude_domains {
Value::Null Value::Null
} else { } else {
api::core::_get_eq_domains(headers, true).unwrap().into_inner() api::core::_get_eq_domains(headers, true).into_inner()
}; };
Ok(Json(json!({ Json(json!({
"Profile": user_json, "Profile": user_json,
"Folders": folders_json, "Folders": folders_json,
"Collections": collections_json, "Collections": collections_json,
"Policies": policies_json, "Policies": policies_json,
"Ciphers": ciphers_json, "Ciphers": ciphers_json,
"Domains": domains_json, "Domains": domains_json,
"Sends": sends_json,
"Object": "sync" "Object": "sync"
}))) }))
} }
#[get("/ciphers")] #[get("/ciphers")]
fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult { fn get_ciphers(headers: Headers, conn: DbConn) -> Json<Value> {
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn); let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
let ciphers_json: Vec<Value> = ciphers let ciphers_json: Vec<Value> = ciphers
.iter() .iter()
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)) .map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
.collect(); .collect();
Ok(Json(json!({ Json(json!({
"Data": ciphers_json, "Data": ciphers_json,
"Object": "list", "Object": "list",
"ContinuationToken": null "ContinuationToken": null
}))) }))
} }
#[get("/ciphers/<uuid>")] #[get("/ciphers/<uuid>")]
@@ -181,6 +204,14 @@ pub struct CipherData {
#[serde(rename = "Attachments")] #[serde(rename = "Attachments")]
_Attachments: Option<Value>, // Unused, contains map of {id: filename} _Attachments: Option<Value>, // Unused, contains map of {id: filename}
Attachments2: Option<HashMap<String, Attachments2Data>>, Attachments2: Option<HashMap<String, Attachments2Data>>,
// The revision datetime (in ISO 8601 format) of the client's local copy
// of the cipher. This is used to prevent a client from updating a cipher
// when it doesn't have the latest version, as that can result in data
// loss. It's not an error when no value is provided; this can happen
// when using older client versions, or if the operation doesn't involve
// updating an existing cipher.
LastKnownRevisionDate: Option<String>,
} }
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
@@ -190,22 +221,46 @@ pub struct Attachments2Data {
Key: String, Key: String,
} }
/// Called when an org admin clones an org cipher.
#[post("/ciphers/admin", data = "<data>")] #[post("/ciphers/admin", data = "<data>")]
fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
let data: ShareCipherData = data.into_inner().data; post_ciphers_create(data, headers, conn, nt)
}
/// Called when creating a new org-owned cipher, or cloning a cipher (whether
/// user- or org-owned). When cloning a cipher to a user-owned cipher,
/// `organizationId` is null.
#[post("/ciphers/create", data = "<data>")]
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
let mut data: ShareCipherData = data.into_inner().data;
// Check if there are one more more collections selected when this cipher is part of an organization.
// err if this is not the case before creating an empty cipher.
if data.Cipher.OrganizationId.is_some() && data.CollectionIds.is_empty() {
err!("You must select at least one collection.");
}
// This check is usually only needed in update_cipher_from_data(), but we
// need it here as well to avoid creating an empty cipher in the call to
// cipher.save() below.
enforce_personal_ownership_policy(&data.Cipher, &headers, &conn)?;
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone()); let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
cipher.user_uuid = Some(headers.user.uuid.clone()); cipher.user_uuid = Some(headers.user.uuid.clone());
cipher.save(&conn)?; cipher.save(&conn)?;
// When cloning a cipher, the Bitwarden clients seem to set this field
// based on the cipher being cloned (when creating a new cipher, it's set
// to null as expected). However, `cipher.created_at` is initialized to
// the current time, so the stale data check will end up failing down the
// line. Since this function only creates new ciphers (whether by cloning
// or otherwise), we can just ignore this field entirely.
data.Cipher.LastKnownRevisionDate = None;
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt) share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt)
} }
#[post("/ciphers/create", data = "<data>")] /// Called when creating a new user-owned cipher.
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
post_ciphers_admin(data, headers, conn, nt)
}
#[post("/ciphers", data = "<data>")] #[post("/ciphers", data = "<data>")]
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
let data: CipherData = data.into_inner().data; let data: CipherData = data.into_inner().data;
@@ -216,6 +271,29 @@ fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn))) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
} }
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
/// A non-owner/admin user belonging to an org with the personal ownership policy
/// enabled isn't allowed to create new user-owned ciphers or modify existing ones
/// (that were created before the policy was applicable to the user). The user is
/// allowed to delete or share such ciphers to an org, however.
///
/// Ref: https://bitwarden.com/help/article/policies/#personal-ownership
fn enforce_personal_ownership_policy(
data: &CipherData,
headers: &Headers,
conn: &DbConn
) -> EmptyResult {
if data.OrganizationId.is_none() {
let user_uuid = &headers.user.uuid;
let policy_type = OrgPolicyType::PersonalOwnership;
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
err!("Due to an Enterprise Policy, you are restricted from \
saving items to your personal vault.")
}
}
Ok(())
}
pub fn update_cipher_from_data( pub fn update_cipher_from_data(
cipher: &mut Cipher, cipher: &mut Cipher,
data: CipherData, data: CipherData,
@@ -225,6 +303,19 @@ pub fn update_cipher_from_data(
nt: &Notify, nt: &Notify,
ut: UpdateType, ut: UpdateType,
) -> EmptyResult { ) -> EmptyResult {
enforce_personal_ownership_policy(&data, headers, conn)?;
// Check that the client isn't updating an existing cipher with stale data.
if let Some(dt) = data.LastKnownRevisionDate {
match NaiveDateTime::parse_from_str(&dt, "%+") { // ISO 8601 format
Err(err) =>
warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 =>
err!("The client copy of this cipher is out of date. Resync the client and try again."),
Ok(_) => (),
}
}
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId { if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
err!("Organization mismatch. Please resync the client before updating the cipher") err!("Organization mismatch. Please resync the client before updating the cipher")
} }
@@ -238,6 +329,11 @@ pub fn update_cipher_from_data(
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) || cipher.is_write_accessible_to_user(&headers.user.uuid, &conn)
{ {
cipher.organization_uuid = Some(org_id); cipher.organization_uuid = Some(org_id);
// After some discussion in PR #1329 re-added the user_uuid = None again.
// TODO: Audit/Check the whole save/update cipher chain.
// Upstream uses the user_uuid to allow a cipher added by a user to an org to still allow the user to view/edit the cipher
// even when the user has hide-passwords configured as there policy.
// Removing the line below would fix that, but we have to check which effect this would have on the rest of the code.
cipher.user_uuid = None; cipher.user_uuid = None;
} else { } else {
err!("You don't have permission to add cipher directly to organization") err!("You don't have permission to add cipher directly to organization")
@@ -268,7 +364,10 @@ pub fn update_cipher_from_data(
}; };
if saved_att.cipher_uuid != cipher.uuid { if saved_att.cipher_uuid != cipher.uuid {
err!("Attachment is not owned by the cipher") // Warn and break here since cloning ciphers provides attachment data but will not be cloned.
// If we error out here it will break the whole cloning and causes empty ciphers to appear.
warn!("Attachment is not owned by the cipher");
break;
} }
saved_att.akey = Some(attachment.Key); saved_att.akey = Some(attachment.Key);
@@ -278,6 +377,23 @@ pub fn update_cipher_from_data(
} }
} }
// Cleanup cipher data, like removing the 'Response' key.
// This key is somewhere generated during Javascript so no way for us this fix this.
// Also, upstream only retrieves keys they actually want to store, and thus skip the 'Response' key.
// We do not mind which data is in it, the keep our model more flexible when there are upstream changes.
// But, we at least know we do not need to store and return this specific key.
fn _clean_cipher_data(mut json_data: Value) -> Value {
if json_data.is_array() {
json_data.as_array_mut()
.unwrap()
.iter_mut()
.for_each(|ref mut f| {
f.as_object_mut().unwrap().remove("Response");
});
};
json_data
}
let type_data_opt = match data.Type { let type_data_opt = match data.Type {
1 => data.Login, 1 => data.Login,
2 => data.SecureNote, 2 => data.SecureNote,
@@ -286,29 +402,28 @@ pub fn update_cipher_from_data(
_ => err!("Invalid type"), _ => err!("Invalid type"),
}; };
let mut type_data = match type_data_opt { let type_data = match type_data_opt {
Some(data) => data, Some(mut data) => {
// Remove the 'Response' key from the base object.
data.as_object_mut().unwrap().remove("Response");
// Remove the 'Response' key from every Uri.
if data["Uris"].is_array() {
data["Uris"] = _clean_cipher_data(data["Uris"].clone());
}
data
},
None => err!("Data missing"), None => err!("Data missing"),
}; };
// TODO: ******* Backwards compat start **********
// To remove backwards compatibility, just delete this code,
// and remove the compat code from cipher::to_json
type_data["Name"] = Value::String(data.Name.clone());
type_data["Notes"] = data.Notes.clone().map(Value::String).unwrap_or(Value::Null);
type_data["Fields"] = data.Fields.clone().unwrap_or(Value::Null);
type_data["PasswordHistory"] = data.PasswordHistory.clone().unwrap_or(Value::Null);
// TODO: ******* Backwards compat end **********
cipher.favorite = data.Favorite.unwrap_or(false);
cipher.name = data.Name; cipher.name = data.Name;
cipher.notes = data.Notes; cipher.notes = data.Notes;
cipher.fields = data.Fields.map(|f| f.to_string()); cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string() );
cipher.data = type_data.to_string(); cipher.data = type_data.to_string();
cipher.password_history = data.PasswordHistory.map(|f| f.to_string()); cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
cipher.save(&conn)?; cipher.save(&conn)?;
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?; cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?;
cipher.set_favorite(data.Favorite, &headers.user.uuid, &conn)?;
if ut != UpdateType::None { if ut != UpdateType::None {
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn)); nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
@@ -371,6 +486,7 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
Ok(()) Ok(())
} }
/// Called when an org admin modifies an existing org cipher.
#[put("/ciphers/<uuid>/admin", data = "<data>")] #[put("/ciphers/<uuid>/admin", data = "<data>")]
fn put_cipher_admin( fn put_cipher_admin(
uuid: String, uuid: String,
@@ -407,6 +523,11 @@ fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn
None => err!("Cipher doesn't exist"), None => err!("Cipher doesn't exist"),
}; };
// TODO: Check if only the folder ID or favorite status is being changed.
// These are per-user properties that technically aren't part of the
// cipher itself, so the user shouldn't need write access to change these.
// Interestingly, upstream Bitwarden doesn't properly handle this either.
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) { if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
err!("Cipher is not write accessible") err!("Cipher is not write accessible")
} }
@@ -540,7 +661,7 @@ struct ShareSelectedCipherData {
} }
#[put("/ciphers/share", data = "<data>")] #[put("/ciphers/share", data = "<data>")]
fn put_cipher_share_seleted( fn put_cipher_share_selected(
data: JsonUpcase<ShareSelectedCipherData>, data: JsonUpcase<ShareSelectedCipherData>,
headers: Headers, headers: Headers,
conn: DbConn, conn: DbConn,
@@ -608,9 +729,8 @@ fn share_cipher_by_uuid(
match data.Cipher.OrganizationId.clone() { match data.Cipher.OrganizationId.clone() {
// If we don't get an organization ID, we don't do anything // If we don't get an organization ID, we don't do anything
// No error because this is used when using the Clone functionality // No error because this is used when using the Clone functionality
None => {}, None => {}
Some(organization_uuid) => { Some(organization_uuid) => {
for uuid in &data.CollectionIds { for uuid in &data.CollectionIds {
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) { match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
None => err!("Invalid collection ID provided"), None => err!("Invalid collection ID provided"),
@@ -665,8 +785,8 @@ fn post_attachment(
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid { let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
match CONFIG.user_attachment_limit() { match CONFIG.user_attachment_limit() {
Some(0) => err_discard!("Attachments are disabled", data), Some(0) => err_discard!("Attachments are disabled", data),
Some(limit) => { Some(limit_kb) => {
let left = limit - Attachment::size_by_user(user_uuid, &conn); let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &conn);
if left <= 0 { if left <= 0 {
err_discard!("Attachment size limit reached! Delete some files to open space", data) err_discard!("Attachment size limit reached! Delete some files to open space", data)
} }
@@ -677,8 +797,8 @@ fn post_attachment(
} else if let Some(ref org_uuid) = cipher.organization_uuid { } else if let Some(ref org_uuid) = cipher.organization_uuid {
match CONFIG.org_attachment_limit() { match CONFIG.org_attachment_limit() {
Some(0) => err_discard!("Attachments are disabled", data), Some(0) => err_discard!("Attachments are disabled", data),
Some(limit) => { Some(limit_kb) => {
let left = limit - Attachment::size_by_org(org_uuid, &conn); let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &conn);
if left <= 0 { if left <= 0 {
err_discard!("Attachment size limit reached! Delete some files to open space", data) err_discard!("Attachment size limit reached! Delete some files to open space", data)
} }
@@ -761,11 +881,7 @@ fn post_attachment_admin(
post_attachment(uuid, data, content_type, headers, conn, nt) post_attachment(uuid, data, content_type, headers, conn, nt)
} }
#[post( #[post("/ciphers/<uuid>/attachment/<attachment_id>/share", format = "multipart/form-data", data = "<data>")]
"/ciphers/<uuid>/attachment/<attachment_id>/share",
format = "multipart/form-data",
data = "<data>"
)]
fn post_attachment_share( fn post_attachment_share(
uuid: String, uuid: String,
attachment_id: String, attachment_id: String,
@@ -819,50 +935,79 @@ fn delete_attachment_admin(
#[post("/ciphers/<uuid>/delete")] #[post("/ciphers/<uuid>/delete")]
fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt) _delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
} }
#[post("/ciphers/<uuid>/delete-admin")] #[post("/ciphers/<uuid>/delete-admin")]
fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt) _delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
}
#[put("/ciphers/<uuid>/delete")]
fn delete_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt)
}
#[put("/ciphers/<uuid>/delete-admin")]
fn delete_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt)
} }
#[delete("/ciphers/<uuid>")] #[delete("/ciphers/<uuid>")]
fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt) _delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
} }
#[delete("/ciphers/<uuid>/admin")] #[delete("/ciphers/<uuid>/admin")]
fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt) _delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
} }
#[delete("/ciphers", data = "<data>")] #[delete("/ciphers", data = "<data>")]
fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
let data: Value = data.into_inner().data; _delete_multiple_ciphers(data, headers, conn, false, nt)
let uuids = match data.get("Ids") {
Some(ids) => match ids.as_array() {
Some(ids) => ids.iter().filter_map(Value::as_str),
None => err!("Posted ids field is not an array"),
},
None => err!("Request missing ids field"),
};
for uuid in uuids {
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, &nt) {
return error;
};
}
Ok(())
} }
#[post("/ciphers/delete", data = "<data>")] #[post("/ciphers/delete", data = "<data>")]
fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
_delete_multiple_ciphers(data, headers, conn, false, nt)
}
#[put("/ciphers/delete", data = "<data>")]
fn delete_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
_delete_multiple_ciphers(data, headers, conn, true, nt) // soft delete
}
#[delete("/ciphers/admin", data = "<data>")]
fn delete_cipher_selected_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
delete_cipher_selected(data, headers, conn, nt) delete_cipher_selected(data, headers, conn, nt)
} }
#[post("/ciphers/delete-admin", data = "<data>")]
fn delete_cipher_selected_post_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
delete_cipher_selected_post(data, headers, conn, nt)
}
#[put("/ciphers/delete-admin", data = "<data>")]
fn delete_cipher_selected_put_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
delete_cipher_selected_put(data, headers, conn, nt)
}
#[put("/ciphers/<uuid>/restore")]
fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
}
#[put("/ciphers/<uuid>/restore-admin")]
fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
}
#[put("/ciphers/restore", data = "<data>")]
fn restore_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
_restore_multiple_ciphers(data, &headers, &conn, &nt)
}
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[allow(non_snake_case)]
struct MoveCipherData { struct MoveCipherData {
@@ -946,7 +1091,6 @@ fn delete_all(
Some(user_org) => { Some(user_org) => {
if user_org.atype == UserOrgType::Owner { if user_org.atype == UserOrgType::Owner {
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?; Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
Collection::delete_all_by_organization(&org_data.org_id, &conn)?;
nt.send_user_update(UpdateType::Vault, &user); nt.send_user_update(UpdateType::Vault, &user);
Ok(()) Ok(())
} else { } else {
@@ -974,8 +1118,8 @@ fn delete_all(
} }
} }
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult { fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, soft_delete: bool, nt: &Notify) -> EmptyResult {
let cipher = match Cipher::find_by_uuid(&uuid, &conn) { let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
Some(cipher) => cipher, Some(cipher) => cipher,
None => err!("Cipher doesn't exist"), None => err!("Cipher doesn't exist"),
}; };
@@ -984,11 +1128,81 @@ fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Not
err!("Cipher can't be deleted by user") err!("Cipher can't be deleted by user")
} }
cipher.delete(&conn)?; if soft_delete {
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn)); cipher.deleted_at = Some(Utc::now().naive_utc());
cipher.save(&conn)?;
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
} else {
cipher.delete(&conn)?;
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
}
Ok(()) Ok(())
} }
fn _delete_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, soft_delete: bool, nt: Notify) -> EmptyResult {
let data: Value = data.into_inner().data;
let uuids = match data.get("Ids") {
Some(ids) => match ids.as_array() {
Some(ids) => ids.iter().filter_map(Value::as_str),
None => err!("Posted ids field is not an array"),
},
None => err!("Request missing ids field"),
};
for uuid in uuids {
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, soft_delete, &nt) {
return error;
};
}
Ok(())
}
fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
Some(cipher) => cipher,
None => err!("Cipher doesn't exist"),
};
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
err!("Cipher can't be restored by user")
}
cipher.deleted_at = None;
cipher.save(&conn)?;
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
}
fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
let data: Value = data.into_inner().data;
let uuids = match data.get("Ids") {
Some(ids) => match ids.as_array() {
Some(ids) => ids.iter().filter_map(Value::as_str),
None => err!("Posted ids field is not an array"),
},
None => err!("Request missing ids field"),
};
let mut ciphers: Vec<Value> = Vec::new();
for uuid in uuids {
match _restore_cipher_by_uuid(uuid, headers, conn, nt) {
Ok(json) => ciphers.push(json.into_inner()),
err => return err
}
}
Ok(Json(json!({
"Data": ciphers,
"Object": "list",
"ContinuationToken": null
})))
}
fn _delete_cipher_attachment_by_id( fn _delete_cipher_attachment_by_id(
uuid: &str, uuid: &str,
attachment_id: &str, attachment_id: &str,

View File

@@ -1,15 +1,13 @@
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use crate::db::models::*; use crate::{
use crate::db::DbConn; api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
auth::Headers,
db::{models::*, DbConn},
};
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType}; pub fn routes() -> Vec<rocket::Route> {
use crate::auth::Headers;
use rocket::Route;
pub fn routes() -> Vec<Route> {
routes![ routes![
get_folders, get_folders,
get_folder, get_folder,
@@ -22,16 +20,16 @@ pub fn routes() -> Vec<Route> {
} }
#[get("/folders")] #[get("/folders")]
fn get_folders(headers: Headers, conn: DbConn) -> JsonResult { fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
let folders = Folder::find_by_user(&headers.user.uuid, &conn); let folders = Folder::find_by_user(&headers.user.uuid, &conn);
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect(); let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
Ok(Json(json!({ Json(json!({
"Data": folders_json, "Data": folders_json,
"Object": "list", "Object": "list",
"ContinuationToken": null, "ContinuationToken": null,
}))) }))
} }
#[get("/folders/<uuid>")] #[get("/folders/<uuid>")]
@@ -50,7 +48,6 @@ fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[allow(non_snake_case)]
pub struct FolderData { pub struct FolderData {
pub Name: String, pub Name: String,
} }

View File

@@ -2,7 +2,10 @@ mod accounts;
mod ciphers; mod ciphers;
mod folders; mod folders;
mod organizations; mod organizations;
pub(crate) mod two_factor; pub mod two_factor;
mod sends;
pub use sends::start_send_deletion_scheduler;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
let mut mod_routes = routes![ let mut mod_routes = routes![
@@ -20,6 +23,7 @@ pub fn routes() -> Vec<Route> {
routes.append(&mut folders::routes()); routes.append(&mut folders::routes());
routes.append(&mut organizations::routes()); routes.append(&mut organizations::routes());
routes.append(&mut two_factor::routes()); routes.append(&mut two_factor::routes());
routes.append(&mut sends::routes());
routes.append(&mut mod_routes); routes.append(&mut mod_routes);
routes routes
@@ -29,17 +33,19 @@ pub fn routes() -> Vec<Route> {
// Move this somewhere else // Move this somewhere else
// //
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use rocket::response::Response;
use serde_json::Value; use serde_json::Value;
use crate::api::{EmptyResult, JsonResult, JsonUpcase}; use crate::{
use crate::auth::Headers; api::{JsonResult, JsonUpcase},
use crate::db::DbConn; auth::Headers,
use crate::error::Error; db::DbConn,
error::Error,
};
#[put("/devices/identifier/<uuid>/clear-token")] #[put("/devices/identifier/<uuid>/clear-token")]
fn clear_device_token(uuid: String) -> EmptyResult { fn clear_device_token<'a>(uuid: String) -> Response<'a> {
// This endpoint doesn't have auth header // This endpoint doesn't have auth header
let _ = uuid; let _ = uuid;
@@ -48,11 +54,11 @@ fn clear_device_token(uuid: String) -> EmptyResult {
// This only clears push token // This only clears push token
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109 // https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37 // https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
Ok(()) Response::new()
} }
#[put("/devices/identifier/<uuid>/token", data = "<data>")] #[put("/devices/identifier/<uuid>/token", data = "<data>")]
fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> JsonResult { fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> Json<Value> {
let _data: Value = data.into_inner().data; let _data: Value = data.into_inner().data;
// Data has a single string value "PushToken" // Data has a single string value "PushToken"
let _ = uuid; let _ = uuid;
@@ -60,13 +66,13 @@ fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) ->
// TODO: This should save the push token, but we don't have push functionality // TODO: This should save the push token, but we don't have push functionality
Ok(Json(json!({ Json(json!({
"Id": headers.device.uuid, "Id": headers.device.uuid,
"Name": headers.device.name, "Name": headers.device.name,
"Type": headers.device.atype, "Type": headers.device.atype,
"Identifier": headers.device.uuid, "Identifier": headers.device.uuid,
"CreationDate": crate::util::format_date(&headers.device.created_at), "CreationDate": crate::util::format_date(&headers.device.created_at),
}))) }))
} }
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
@@ -80,11 +86,11 @@ struct GlobalDomain {
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json"); const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
#[get("/settings/domains")] #[get("/settings/domains")]
fn get_eq_domains(headers: Headers) -> JsonResult { fn get_eq_domains(headers: Headers) -> Json<Value> {
_get_eq_domains(headers, false) _get_eq_domains(headers, false)
} }
fn _get_eq_domains(headers: Headers, no_excluded: bool) -> JsonResult { fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
let user = headers.user; let user = headers.user;
use serde_json::from_str; use serde_json::from_str;
@@ -101,11 +107,11 @@ fn _get_eq_domains(headers: Headers, no_excluded: bool) -> JsonResult {
globals.retain(|g| !g.Excluded); globals.retain(|g| !g.Excluded);
} }
Ok(Json(json!({ Json(json!({
"EquivalentDomains": equivalent_domains, "EquivalentDomains": equivalent_domains,
"GlobalEquivalentDomains": globals, "GlobalEquivalentDomains": globals,
"Object": "domains", "Object": "domains",
}))) }))
} }
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
@@ -146,7 +152,7 @@ fn hibp_breach(username: String) -> JsonResult {
username username
); );
use reqwest::{header::USER_AGENT, blocking::Client}; use reqwest::{blocking::Client, header::USER_AGENT};
if let Some(api_key) = crate::CONFIG.hibp_api_key() { if let Some(api_key) = crate::CONFIG.hibp_api_key() {
let hibp_client = Client::builder().build()?; let hibp_client = Client::builder().build()?;
@@ -171,7 +177,7 @@ fn hibp_breach(username: String) -> JsonResult {
"Domain": "haveibeenpwned.com", "Domain": "haveibeenpwned.com",
"BreachDate": "2019-08-18T00:00:00Z", "BreachDate": "2019-08-18T00:00:00Z",
"AddedDate": "2019-08-18T00:00:00Z", "AddedDate": "2019-08-18T00:00:00Z",
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username), "Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
"LogoPath": "bwrs_static/hibp.png", "LogoPath": "bwrs_static/hibp.png",
"PwnCount": 0, "PwnCount": 0,
"DataClasses": [ "DataClasses": [

View File

@@ -1,17 +1,14 @@
use rocket::request::Form; use num_traits::FromPrimitive;
use rocket::Route; use rocket::{request::Form, Route};
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use num_traits::FromPrimitive;
use crate::api::{ use crate::{
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType, api::{EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType},
auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders, ManagerHeaders, ManagerHeadersLoose},
db::{models::*, DbConn},
mail, CONFIG,
}; };
use crate::auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders};
use crate::db::models::*;
use crate::db::DbConn;
use crate::mail;
use crate::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![ routes![
@@ -50,6 +47,10 @@ pub fn routes() -> Vec<Route> {
list_policies_token, list_policies_token,
get_policy, get_policy,
put_policy, put_policy,
get_organization_tax,
get_plans,
get_plans_tax_rates,
import,
] ]
} }
@@ -79,6 +80,10 @@ struct NewCollectionData {
#[post("/organizations", data = "<data>")] #[post("/organizations", data = "<data>")]
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult { fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
if !CONFIG.is_org_creation_allowed(&headers.user.email) {
err!("User not allowed to create organizations")
}
let data: OrgData = data.into_inner().data; let data: OrgData = data.into_inner().data;
let org = Organization::new(data.Name, data.BillingEmail); let org = Organization::new(data.Name, data.BillingEmail);
@@ -187,8 +192,8 @@ fn post_organization(
// GET /api/collections?writeOnly=false // GET /api/collections?writeOnly=false
#[get("/collections")] #[get("/collections")]
fn get_user_collections(headers: Headers, conn: DbConn) -> JsonResult { fn get_user_collections(headers: Headers, conn: DbConn) -> Json<Value> {
Ok(Json(json!({ Json(json!({
"Data": "Data":
Collection::find_by_user_uuid(&headers.user.uuid, &conn) Collection::find_by_user_uuid(&headers.user.uuid, &conn)
.iter() .iter()
@@ -196,12 +201,12 @@ fn get_user_collections(headers: Headers, conn: DbConn) -> JsonResult {
.collect::<Value>(), .collect::<Value>(),
"Object": "list", "Object": "list",
"ContinuationToken": null, "ContinuationToken": null,
}))) }))
} }
#[get("/organizations/<org_id>/collections")] #[get("/organizations/<org_id>/collections")]
fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult { fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
Ok(Json(json!({ Json(json!({
"Data": "Data":
Collection::find_by_organization(&org_id, &conn) Collection::find_by_organization(&org_id, &conn)
.iter() .iter()
@@ -209,13 +214,13 @@ fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) ->
.collect::<Value>(), .collect::<Value>(),
"Object": "list", "Object": "list",
"ContinuationToken": null, "ContinuationToken": null,
}))) }))
} }
#[post("/organizations/<org_id>/collections", data = "<data>")] #[post("/organizations/<org_id>/collections", data = "<data>")]
fn post_organization_collections( fn post_organization_collections(
org_id: String, org_id: String,
_headers: AdminHeaders, headers: ManagerHeadersLoose,
data: JsonUpcase<NewCollectionData>, data: JsonUpcase<NewCollectionData>,
conn: DbConn, conn: DbConn,
) -> JsonResult { ) -> JsonResult {
@@ -226,9 +231,22 @@ fn post_organization_collections(
None => err!("Can't find organization details"), None => err!("Can't find organization details"),
}; };
// Get the user_organization record so that we can check if the user has access to all collections.
let user_org = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
Some(u) => u,
None => err!("User is not part of organization"),
};
let collection = Collection::new(org.uuid, data.Name); let collection = Collection::new(org.uuid, data.Name);
collection.save(&conn)?; collection.save(&conn)?;
// If the user doesn't have access to all collections, only in case of a Manger,
// then we need to save the creating user uuid (Manager) to the users_collection table.
// Else the user will not have access to his own created collection.
if !user_org.access_all {
CollectionUser::save(&headers.user.uuid, &collection.uuid, false, false, &conn)?;
}
Ok(Json(collection.to_json())) Ok(Json(collection.to_json()))
} }
@@ -236,7 +254,7 @@ fn post_organization_collections(
fn put_organization_collection_update( fn put_organization_collection_update(
org_id: String, org_id: String,
col_id: String, col_id: String,
headers: AdminHeaders, headers: ManagerHeaders,
data: JsonUpcase<NewCollectionData>, data: JsonUpcase<NewCollectionData>,
conn: DbConn, conn: DbConn,
) -> JsonResult { ) -> JsonResult {
@@ -247,7 +265,7 @@ fn put_organization_collection_update(
fn post_organization_collection_update( fn post_organization_collection_update(
org_id: String, org_id: String,
col_id: String, col_id: String,
_headers: AdminHeaders, _headers: ManagerHeaders,
data: JsonUpcase<NewCollectionData>, data: JsonUpcase<NewCollectionData>,
conn: DbConn, conn: DbConn,
) -> JsonResult { ) -> JsonResult {
@@ -315,7 +333,7 @@ fn post_organization_collection_delete_user(
} }
#[delete("/organizations/<org_id>/collections/<col_id>")] #[delete("/organizations/<org_id>/collections/<col_id>")]
fn delete_organization_collection(org_id: String, col_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult { fn delete_organization_collection(org_id: String, col_id: String, _headers: ManagerHeaders, conn: DbConn) -> EmptyResult {
match Collection::find_by_uuid(&col_id, &conn) { match Collection::find_by_uuid(&col_id, &conn) {
None => err!("Collection not found"), None => err!("Collection not found"),
Some(collection) => { Some(collection) => {
@@ -339,7 +357,7 @@ struct DeleteCollectionData {
fn post_organization_collection_delete( fn post_organization_collection_delete(
org_id: String, org_id: String,
col_id: String, col_id: String,
headers: AdminHeaders, headers: ManagerHeaders,
_data: JsonUpcase<DeleteCollectionData>, _data: JsonUpcase<DeleteCollectionData>,
conn: DbConn, conn: DbConn,
) -> EmptyResult { ) -> EmptyResult {
@@ -347,7 +365,7 @@ fn post_organization_collection_delete(
} }
#[get("/organizations/<org_id>/collections/<coll_id>/details")] #[get("/organizations/<org_id>/collections/<coll_id>/details")]
fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHeaders, conn: DbConn) -> JsonResult { fn get_org_collection_detail(org_id: String, coll_id: String, headers: ManagerHeaders, conn: DbConn) -> JsonResult {
match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) { match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) {
None => err!("Collection not found"), None => err!("Collection not found"),
Some(collection) => { Some(collection) => {
@@ -361,7 +379,7 @@ fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHead
} }
#[get("/organizations/<org_id>/collections/<coll_id>/users")] #[get("/organizations/<org_id>/collections/<coll_id>/users")]
fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult { fn get_collection_users(org_id: String, coll_id: String, _headers: ManagerHeaders, conn: DbConn) -> JsonResult {
// Get org and collection, check that collection is from org // Get org and collection, check that collection is from org
let collection = match Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn) { let collection = match Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn) {
None => err!("Collection not found in Organization"), None => err!("Collection not found in Organization"),
@@ -374,7 +392,7 @@ fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders,
.map(|col_user| { .map(|col_user| {
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn) UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
.unwrap() .unwrap()
.to_json_collection_user_details(col_user.read_only) .to_json_user_access_restrictions(&col_user)
}) })
.collect(); .collect();
@@ -386,7 +404,7 @@ fn put_collection_users(
org_id: String, org_id: String,
coll_id: String, coll_id: String,
data: JsonUpcaseVec<CollectionData>, data: JsonUpcaseVec<CollectionData>,
_headers: AdminHeaders, _headers: ManagerHeaders,
conn: DbConn, conn: DbConn,
) -> EmptyResult { ) -> EmptyResult {
// Get org and collection, check that collection is from org // Get org and collection, check that collection is from org
@@ -408,7 +426,9 @@ fn put_collection_users(
continue; continue;
} }
CollectionUser::save(&user.user_uuid, &coll_id, d.ReadOnly, &conn)?; CollectionUser::save(&user.user_uuid, &coll_id,
d.ReadOnly, d.HidePasswords,
&conn)?;
} }
Ok(()) Ok(())
@@ -421,30 +441,30 @@ struct OrgIdData {
} }
#[get("/ciphers/organization-details?<data..>")] #[get("/ciphers/organization-details?<data..>")]
fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> JsonResult { fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> Json<Value> {
let ciphers = Cipher::find_by_org(&data.organization_id, &conn); let ciphers = Cipher::find_by_org(&data.organization_id, &conn);
let ciphers_json: Vec<Value> = ciphers let ciphers_json: Vec<Value> = ciphers
.iter() .iter()
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)) .map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
.collect(); .collect();
Ok(Json(json!({ Json(json!({
"Data": ciphers_json, "Data": ciphers_json,
"Object": "list", "Object": "list",
"ContinuationToken": null, "ContinuationToken": null,
}))) }))
} }
#[get("/organizations/<org_id>/users")] #[get("/organizations/<org_id>/users")]
fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult { fn get_org_users(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> Json<Value> {
let users = UserOrganization::find_by_org(&org_id, &conn); let users = UserOrganization::find_by_org(&org_id, &conn);
let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect(); let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect();
Ok(Json(json!({ Json(json!({
"Data": users_json, "Data": users_json,
"Object": "list", "Object": "list",
"ContinuationToken": null, "ContinuationToken": null,
}))) }))
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@@ -452,6 +472,7 @@ fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonRe
struct CollectionData { struct CollectionData {
Id: String, Id: String,
ReadOnly: bool, ReadOnly: bool,
HidePasswords: bool,
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@@ -485,7 +506,11 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
let user = match User::find_by_mail(&email, &conn) { let user = match User::find_by_mail(&email, &conn) {
None => { None => {
if !CONFIG.invitations_allowed() { if !CONFIG.invitations_allowed() {
err!(format!("User email does not exist: {}", email)) err!(format!("User does not exist: {}", email))
}
if !CONFIG.is_email_domain_allowed(&email) {
err!("Email domain not eligible for invitations")
} }
if !CONFIG.mail_enabled() { if !CONFIG.mail_enabled() {
@@ -519,7 +544,9 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) { match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
None => err!("Collection not found in Organization"), None => err!("Collection not found in Organization"),
Some(collection) => { Some(collection) => {
CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, &conn)?; CollectionUser::save(&user.uuid, &collection.uuid,
col.ReadOnly, col.HidePasswords,
&conn)?;
} }
} }
} }
@@ -628,7 +655,7 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
} }
if CONFIG.mail_enabled() { if CONFIG.mail_enabled() {
let mut org_name = String::from("bitwarden_rs"); let mut org_name = CONFIG.invitation_org_name();
if let Some(org_id) = &claims.org_id { if let Some(org_id) = &claims.org_id {
org_name = match Organization::find_by_uuid(&org_id, &conn) { org_name = match Organization::find_by_uuid(&org_id, &conn) {
Some(org) => org.name, Some(org) => org.name,
@@ -774,7 +801,9 @@ fn edit_user(
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) { match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
None => err!("Collection not found in Organization"), None => err!("Collection not found in Organization"),
Some(collection) => { Some(collection) => {
CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid, col.ReadOnly, &conn)?; CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid,
col.ReadOnly, col.HidePasswords,
&conn)?;
} }
} }
} }
@@ -901,15 +930,15 @@ fn post_org_import(
} }
#[get("/organizations/<org_id>/policies")] #[get("/organizations/<org_id>/policies")]
fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult { fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
let policies = OrgPolicy::find_by_org(&org_id, &conn); let policies = OrgPolicy::find_by_org(&org_id, &conn);
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect(); let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
Ok(Json(json!({ Json(json!({
"Data": policies_json, "Data": policies_json,
"Object": "list", "Object": "list",
"ContinuationToken": null "ContinuationToken": null
}))) }))
} }
#[get("/organizations/<org_id>/policies/token?<token>")] #[get("/organizations/<org_id>/policies/token?<token>")]
@@ -924,7 +953,7 @@ fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResul
if invite_org_id != org_id { if invite_org_id != org_id {
err!("Token doesn't match request organization"); err!("Token doesn't match request organization");
} }
// TODO: We receive the invite token as ?token=<>, validate it contains the org id // TODO: We receive the invite token as ?token=<>, validate it contains the org id
let policies = OrgPolicy::find_by_org(&org_id, &conn); let policies = OrgPolicy::find_by_org(&org_id, &conn);
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect(); let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
@@ -940,7 +969,7 @@ fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResul
fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult { fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) { let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
Some(pt) => pt, Some(pt) => pt,
None => err!("Invalid policy type"), None => err!("Invalid or unsupported policy type"),
}; };
let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) { let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
@@ -978,4 +1007,170 @@ fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: A
policy.save(&conn)?; policy.save(&conn)?;
Ok(Json(policy.to_json())) Ok(Json(policy.to_json()))
} }
#[allow(unused_variables)]
#[get("/organizations/<org_id>/tax")]
fn get_organization_tax(org_id: String, _headers: Headers, _conn: DbConn) -> EmptyResult {
// Prevent a 404 error, which also causes Javascript errors.
err!("Only allowed when not self hosted.")
}
#[get("/plans")]
fn get_plans(_headers: Headers, _conn: DbConn) -> Json<Value> {
Json(json!({
"Object": "list",
"Data": [
{
"Object": "plan",
"Type": 0,
"Product": 0,
"Name": "Free",
"IsAnnual": false,
"NameLocalizationKey": "planNameFree",
"DescriptionLocalizationKey": "planDescFree",
"CanBeUsedByBusiness": false,
"BaseSeats": 2,
"BaseStorageGb": null,
"MaxCollections": 2,
"MaxUsers": 2,
"HasAdditionalSeatsOption": false,
"MaxAdditionalSeats": null,
"HasAdditionalStorageOption": false,
"MaxAdditionalStorage": null,
"HasPremiumAccessOption": false,
"TrialPeriodDays": null,
"HasSelfHost": false,
"HasPolicies": false,
"HasGroups": false,
"HasDirectory": false,
"HasEvents": false,
"HasTotp": false,
"Has2fa": false,
"HasApi": false,
"HasSso": false,
"UsersGetPremium": false,
"UpgradeSortOrder": -1,
"DisplaySortOrder": -1,
"LegacyYear": null,
"Disabled": false,
"StripePlanId": null,
"StripeSeatPlanId": null,
"StripeStoragePlanId": null,
"StripePremiumAccessPlanId": null,
"BasePrice": 0.0,
"SeatPrice": 0.0,
"AdditionalStoragePricePerGb": 0.0,
"PremiumAccessOptionPrice": 0.0
}
],
"ContinuationToken": null
}))
}
#[get("/plans/sales-tax-rates")]
fn get_plans_tax_rates(_headers: Headers, _conn: DbConn) -> Json<Value> {
// Prevent a 404 error, which also causes Javascript errors.
Json(json!({
"Object": "list",
"Data": [],
"ContinuationToken": null
}))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct OrgImportGroupData {
Name: String, // "GroupName"
ExternalId: String, // "cn=GroupName,ou=Groups,dc=example,dc=com"
Users: Vec<String>, // ["uid=user,ou=People,dc=example,dc=com"]
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct OrgImportUserData {
Email: String, // "user@maildomain.net"
ExternalId: String, // "uid=user,ou=People,dc=example,dc=com"
Deleted: bool,
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct OrgImportData {
Groups: Vec<OrgImportGroupData>,
OverwriteExisting: bool,
Users: Vec<OrgImportUserData>,
}
#[post("/organizations/<org_id>/import", data = "<data>")]
fn import(org_id: String, data: JsonUpcase<OrgImportData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data = data.into_inner().data;
// TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way
// to differentiate between auto-imported users and manually added ones.
// This means that this endpoint can end up removing users that were added manually by an admin,
// as opposed to upstream which only removes auto-imported users.
// User needs to be admin or owner to use the Directry Connector
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
None => err!("User not part of organization"),
};
for user_data in &data.Users {
if user_data.Deleted {
// If user is marked for deletion and it exists, delete it
if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn) {
user_org.delete(&conn)?;
}
// If user is not part of the organization, but it exists
} else if UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn).is_none() {
if let Some (user) = User::find_by_mail(&user_data.Email, &conn) {
let user_org_status = if CONFIG.mail_enabled() {
UserOrgStatus::Invited as i32
} else {
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
};
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
new_org_user.access_all = false;
new_org_user.atype = UserOrgType::User as i32;
new_org_user.status = user_org_status;
new_org_user.save(&conn)?;
if CONFIG.mail_enabled() {
let org_name = match Organization::find_by_uuid(&org_id, &conn) {
Some(org) => org.name,
None => err!("Error looking up organization"),
};
mail::send_invite(
&user_data.Email,
&user.uuid,
Some(org_id.clone()),
Some(new_org_user.uuid),
&org_name,
Some(headers.user.email.clone()),
)?;
}
}
}
}
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
if data.OverwriteExisting {
for user_org in UserOrganization::find_by_org_and_type(&org_id, UserOrgType::User as i32, &conn) {
if let Some (user_email) = User::find_by_uuid(&user_org.user_uuid, &conn).map(|u| u.email) {
if !data.Users.iter().any(|u| u.Email == user_email) {
user_org.delete(&conn)?;
}
}
}
}
Ok(())
}

416
src/api/core/sends.rs Normal file
View File

@@ -0,0 +1,416 @@
use std::{io::Read, path::Path};
use chrono::{DateTime, Duration, Utc};
use multipart::server::{save::SavedData, Multipart, SaveResult};
use rocket::{http::ContentType, Data};
use rocket_contrib::json::Json;
use serde_json::Value;
use crate::{
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
auth::{Headers, Host},
db::{models::*, DbConn},
CONFIG,
};
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
pub fn routes() -> Vec<rocket::Route> {
routes![
post_send,
post_send_file,
post_access,
post_access_file,
put_send,
delete_send,
put_remove_password
]
}
pub fn start_send_deletion_scheduler(pool: crate::db::DbPool) {
std::thread::spawn(move || {
loop {
if let Ok(conn) = pool.get() {
info!("Initiating send deletion");
for send in Send::find_all(&conn) {
if chrono::Utc::now().naive_utc() >= send.deletion_date {
send.delete(&conn).ok();
}
}
}
std::thread::sleep(std::time::Duration::from_secs(3600));
}
});
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
pub struct SendData {
pub Type: i32,
pub Key: String,
pub Password: Option<String>,
pub MaxAccessCount: Option<i32>,
pub ExpirationDate: Option<DateTime<Utc>>,
pub DeletionDate: DateTime<Utc>,
pub Disabled: bool,
// Data field
pub Name: String,
pub Notes: Option<String>,
pub Text: Option<Value>,
pub File: Option<Value>,
}
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
/// an org with this policy enabled isn't allowed to create new Sends or
/// modify existing ones, but is allowed to delete them.
///
/// Ref: https://bitwarden.com/help/article/policies/#disable-send
fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult {
let user_uuid = &headers.user.uuid;
let policy_type = OrgPolicyType::DisableSend;
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
}
Ok(())
}
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
let data_val = if data.Type == SendType::Text as i32 {
data.Text
} else if data.Type == SendType::File as i32 {
data.File
} else {
err!("Invalid Send type")
};
let data_str = if let Some(mut d) = data_val {
d.as_object_mut().and_then(|o| o.remove("Response"));
serde_json::to_string(&d)?
} else {
err!("Send data not provided");
};
if data.DeletionDate > Utc::now() + Duration::days(31) {
err!(
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
);
}
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
send.user_uuid = Some(user_uuid);
send.notes = data.Notes;
send.max_access_count = data.MaxAccessCount;
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
send.disabled = data.Disabled;
send.atype = data.Type;
send.set_password(data.Password.as_deref());
Ok(send)
}
#[post("/sends", data = "<data>")]
fn post_send(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
enforce_disable_send_policy(&headers, &conn)?;
let data: SendData = data.into_inner().data;
if data.Type == SendType::File as i32 {
err!("File sends should use /api/sends/file")
}
let mut send = create_send(data, headers.user.uuid.clone())?;
send.save(&conn)?;
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
Ok(Json(send.to_json()))
}
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
enforce_disable_send_policy(&headers, &conn)?;
let boundary = content_type.params().next().expect("No boundary provided").1;
let mut mpart = Multipart::with_body(data.open(), boundary);
// First entry is the SendData JSON
let mut model_entry = match mpart.read_entry()? {
Some(e) if &*e.headers.name == "model" => e,
Some(_) => err!("Invalid entry name"),
None => err!("No model entry present"),
};
let mut buf = String::new();
model_entry.data.read_to_string(&mut buf)?;
let data = serde_json::from_str::<crate::util::UpCase<SendData>>(&buf)?;
// Get the file length and add an extra 10% to avoid issues
const SIZE_110_MB: u64 = 115_343_360;
let size_limit = match CONFIG.user_attachment_limit() {
Some(0) => err!("File uploads are disabled"),
Some(limit_kb) => {
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn);
if left <= 0 {
err!("Attachment size limit reached! Delete some files to open space")
}
std::cmp::Ord::max(left as u64, SIZE_110_MB)
}
None => SIZE_110_MB,
};
// Create the Send
let mut send = create_send(data.data, headers.user.uuid.clone())?;
let file_id: String = data_encoding::HEXLOWER.encode(&crate::crypto::get_random(vec![0; 32]));
if send.atype != SendType::File as i32 {
err!("Send content is not a file");
}
let file_path = Path::new(&CONFIG.sends_folder()).join(&send.uuid).join(&file_id);
// Read the data entry and save the file
let mut data_entry = match mpart.read_entry()? {
Some(e) if &*e.headers.name == "data" => e,
Some(_) => err!("Invalid entry name"),
None => err!("No model entry present"),
};
let size = match data_entry
.data
.save()
.memory_threshold(0)
.size_limit(size_limit)
.with_path(&file_path)
{
SaveResult::Full(SavedData::File(_, size)) => size as i32,
SaveResult::Full(other) => {
std::fs::remove_file(&file_path).ok();
err!(format!("Attachment is not a file: {:?}", other));
}
SaveResult::Partial(_, reason) => {
std::fs::remove_file(&file_path).ok();
err!(format!("Attachment size limit exceeded with this file: {:?}", reason));
}
SaveResult::Error(e) => {
std::fs::remove_file(&file_path).ok();
err!(format!("Error: {:?}", e));
}
};
// Set ID and sizes
let mut data_value: Value = serde_json::from_str(&send.data)?;
if let Some(o) = data_value.as_object_mut() {
o.insert(String::from("Id"), Value::String(file_id));
o.insert(String::from("Size"), Value::Number(size.into()));
o.insert(
String::from("SizeName"),
Value::String(crate::util::get_display_size(size)),
);
}
send.data = serde_json::to_string(&data_value)?;
// Save the changes in the database
send.save(&conn)?;
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
Ok(Json(send.to_json()))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
pub struct SendAccessData {
pub Password: Option<String>,
}
#[post("/sends/access/<access_id>", data = "<data>")]
fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn) -> JsonResult {
let mut send = match Send::find_by_access_id(&access_id, &conn) {
Some(s) => s,
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
};
if let Some(max_access_count) = send.max_access_count {
if send.access_count >= max_access_count {
err_code!(SEND_INACCESSIBLE_MSG, 404);
}
}
if let Some(expiration) = send.expiration_date {
if Utc::now().naive_utc() >= expiration {
err_code!(SEND_INACCESSIBLE_MSG, 404)
}
}
if Utc::now().naive_utc() >= send.deletion_date {
err_code!(SEND_INACCESSIBLE_MSG, 404)
}
if send.disabled {
err_code!(SEND_INACCESSIBLE_MSG, 404)
}
if send.password_hash.is_some() {
match data.into_inner().data.Password {
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
Some(_) => err!("Invalid password."),
None => err_code!("Password not provided", 401),
}
}
// Files are incremented during the download
if send.atype == SendType::Text as i32 {
send.access_count += 1;
}
send.save(&conn)?;
Ok(Json(send.to_json_access()))
}
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
fn post_access_file(
send_id: String,
file_id: String,
data: JsonUpcase<SendAccessData>,
host: Host,
conn: DbConn,
) -> JsonResult {
let mut send = match Send::find_by_uuid(&send_id, &conn) {
Some(s) => s,
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
};
if let Some(max_access_count) = send.max_access_count {
if send.access_count >= max_access_count {
err_code!(SEND_INACCESSIBLE_MSG, 404)
}
}
if let Some(expiration) = send.expiration_date {
if Utc::now().naive_utc() >= expiration {
err_code!(SEND_INACCESSIBLE_MSG, 404)
}
}
if Utc::now().naive_utc() >= send.deletion_date {
err_code!(SEND_INACCESSIBLE_MSG, 404)
}
if send.disabled {
err_code!(SEND_INACCESSIBLE_MSG, 404)
}
if send.password_hash.is_some() {
match data.into_inner().data.Password {
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
Some(_) => err!("Invalid password."),
None => err_code!("Password not provided", 401),
}
}
send.access_count += 1;
send.save(&conn)?;
Ok(Json(json!({
"Object": "send-fileDownload",
"Id": file_id,
"Url": format!("{}/sends/{}/{}", &host.host, send_id, file_id)
})))
}
#[put("/sends/<id>", data = "<data>")]
fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
enforce_disable_send_policy(&headers, &conn)?;
let data: SendData = data.into_inner().data;
let mut send = match Send::find_by_uuid(&id, &conn) {
Some(s) => s,
None => err!("Send not found"),
};
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
err!("Send is not owned by user")
}
if send.atype != data.Type {
err!("Sends can't change type")
}
// When updating a file Send, we receive nulls in the File field, as it's immutable,
// so we only need to update the data field in the Text case
if data.Type == SendType::Text as i32 {
let data_str = if let Some(mut d) = data.Text {
d.as_object_mut().and_then(|d| d.remove("Response"));
serde_json::to_string(&d)?
} else {
err!("Send data not provided");
};
send.data = data_str;
}
if data.DeletionDate > Utc::now() + Duration::days(31) {
err!(
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
);
}
send.name = data.Name;
send.akey = data.Key;
send.deletion_date = data.DeletionDate.naive_utc();
send.notes = data.Notes;
send.max_access_count = data.MaxAccessCount;
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
send.disabled = data.Disabled;
// Only change the value if it's present
if let Some(password) = data.Password {
send.set_password(Some(&password));
}
send.save(&conn)?;
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
Ok(Json(send.to_json()))
}
#[delete("/sends/<id>")]
fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
let send = match Send::find_by_uuid(&id, &conn) {
Some(s) => s,
None => err!("Send not found"),
};
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
err!("Send is not owned by user")
}
send.delete(&conn)?;
nt.send_user_update(UpdateType::SyncSendDelete, &headers.user);
Ok(())
}
#[put("/sends/<id>/remove-password")]
fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
enforce_disable_send_policy(&headers, &conn)?;
let mut send = match Send::find_by_uuid(&id, &conn) {
Some(s) => s,
None => err!("Send not found"),
};
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
err!("Send is not owned by user")
}
send.set_password(None);
send.save(&conn)?;
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
Ok(Json(send.to_json()))
}

View File

@@ -2,13 +2,16 @@ use data_encoding::BASE32;
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use crate::api::core::two_factor::_generate_recover_code; use crate::{
use crate::api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData}; api::{
use crate::auth::Headers; core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
use crate::crypto; },
use crate::db::{ auth::{ClientIp, Headers},
models::{TwoFactor, TwoFactorType}, crypto,
DbConn, db::{
models::{TwoFactor, TwoFactorType},
DbConn,
},
}; };
pub use crate::config::CONFIG; pub use crate::config::CONFIG;
@@ -20,6 +23,7 @@ pub fn routes() -> Vec<Route> {
activate_authenticator_put, activate_authenticator_put,
] ]
} }
#[post("/two-factor/get-authenticator", data = "<data>")] #[post("/two-factor/get-authenticator", data = "<data>")]
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult { fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: PasswordData = data.into_inner().data; let data: PasswordData = data.into_inner().data;
@@ -53,7 +57,12 @@ struct EnableAuthenticatorData {
} }
#[post("/two-factor/authenticator", data = "<data>")] #[post("/two-factor/authenticator", data = "<data>")]
fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult { fn activate_authenticator(
data: JsonUpcase<EnableAuthenticatorData>,
headers: Headers,
ip: ClientIp,
conn: DbConn,
) -> JsonResult {
let data: EnableAuthenticatorData = data.into_inner().data; let data: EnableAuthenticatorData = data.into_inner().data;
let password_hash = data.MasterPasswordHash; let password_hash = data.MasterPasswordHash;
let key = data.Key; let key = data.Key;
@@ -76,7 +85,7 @@ fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: He
} }
// Validate the token provided with the key, and save new twofactor // Validate the token provided with the key, and save new twofactor
validate_totp_code(&user.uuid, token, &key.to_uppercase(), &conn)?; validate_totp_code(&user.uuid, token, &key.to_uppercase(), &ip, &conn)?;
_generate_recover_code(&mut user, &conn); _generate_recover_code(&mut user, &conn);
@@ -88,20 +97,31 @@ fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: He
} }
#[put("/two-factor/authenticator", data = "<data>")] #[put("/two-factor/authenticator", data = "<data>")]
fn activate_authenticator_put(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult { fn activate_authenticator_put(
activate_authenticator(data, headers, conn) data: JsonUpcase<EnableAuthenticatorData>,
headers: Headers,
ip: ClientIp,
conn: DbConn,
) -> JsonResult {
activate_authenticator(data, headers, ip, conn)
} }
pub fn validate_totp_code_str(user_uuid: &str, totp_code: &str, secret: &str, conn: &DbConn) -> EmptyResult { pub fn validate_totp_code_str(
user_uuid: &str,
totp_code: &str,
secret: &str,
ip: &ClientIp,
conn: &DbConn,
) -> EmptyResult {
let totp_code: u64 = match totp_code.parse() { let totp_code: u64 = match totp_code.parse() {
Ok(code) => code, Ok(code) => code,
_ => err!("TOTP code is not a number"), _ => err!("TOTP code is not a number"),
}; };
validate_totp_code(user_uuid, totp_code, secret, &conn) validate_totp_code(user_uuid, totp_code, secret, ip, &conn)
} }
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, conn: &DbConn) -> EmptyResult { pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &ClientIp, conn: &DbConn) -> EmptyResult {
use oath::{totp_raw_custom_time, HashType}; use oath::{totp_raw_custom_time, HashType};
let decoded_secret = match BASE32.decode(secret.as_bytes()) { let decoded_secret = match BASE32.decode(secret.as_bytes()) {
@@ -143,11 +163,22 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, conn: &
twofactor.save(&conn)?; twofactor.save(&conn)?;
return Ok(()); return Ok(());
} else if generated == totp_code && time_step <= twofactor.last_used as i64 { } else if generated == totp_code && time_step <= twofactor.last_used as i64 {
warn!("This or a TOTP code within {} steps back and forward has already been used!", steps); warn!(
err!(format!("Invalid TOTP code! Server time: {}", current_time.format("%F %T UTC"))); "This or a TOTP code within {} steps back and forward has already been used!",
steps
);
err!(format!(
"Invalid TOTP code! Server time: {} IP: {}",
current_time.format("%F %T UTC"),
ip.ip
));
} }
} }
// Else no valide code received, deny access // Else no valide code received, deny access
err!(format!("Invalid TOTP code! Server time: {}", current_time.format("%F %T UTC"))); err!(format!(
"Invalid TOTP code! Server time: {} IP: {}",
current_time.format("%F %T UTC"),
ip.ip
));
} }

View File

@@ -2,18 +2,18 @@ use chrono::Utc;
use data_encoding::BASE64; use data_encoding::BASE64;
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json;
use crate::api::core::two_factor::_generate_recover_code; use crate::{
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData}; api::{core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData},
use crate::auth::Headers; auth::Headers,
use crate::crypto; crypto,
use crate::db::{ db::{
models::{TwoFactor, TwoFactorType, User}, models::{TwoFactor, TwoFactorType, User},
DbConn, DbConn,
},
error::MapResult,
CONFIG,
}; };
use crate::error::MapResult;
use crate::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![get_duo, activate_duo, activate_duo_put,] routes![get_duo, activate_duo, activate_duo_put,]
@@ -21,9 +21,9 @@ pub fn routes() -> Vec<Route> {
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
struct DuoData { struct DuoData {
host: String, host: String, // Duo API hostname
ik: String, ik: String, // integration key
sk: String, sk: String, // secret key
} }
impl DuoData { impl DuoData {
@@ -187,9 +187,10 @@ fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbC
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult { fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)"; const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
use reqwest::{header::*, Method, blocking::Client}; use reqwest::{blocking::Client, header::*, Method};
use std::str::FromStr; use std::str::FromStr;
// https://duo.com/docs/authapi#api-details
let url = format!("https://{}{}", &data.host, path); let url = format!("https://{}{}", &data.host, path);
let date = Utc::now().to_rfc2822(); let date = Utc::now().to_rfc2822();
let username = &data.ik; let username = &data.ik;
@@ -268,6 +269,10 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64
} }
pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult { pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult {
// email is as entered by the user, so it needs to be normalized before
// comparison with auth_user below.
let email = &email.to_lowercase();
let split: Vec<&str> = response.split(':').collect(); let split: Vec<&str> = response.split(':').collect();
if split.len() != 2 { if split.len() != 2 {
err!("Invalid response length"); err!("Invalid response length");

View File

@@ -1,21 +1,18 @@
use chrono::{Duration, NaiveDateTime, Utc};
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json;
use crate::api::core::two_factor::_generate_recover_code; use crate::{
use crate::api::{EmptyResult, JsonResult, JsonUpcase, PasswordData}; api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
use crate::auth::Headers; auth::Headers,
use crate::crypto; crypto,
use crate::db::{ db::{
models::{TwoFactor, TwoFactorType}, models::{TwoFactor, TwoFactorType},
DbConn, DbConn,
},
error::{Error, MapResult},
mail, CONFIG,
}; };
use crate::error::Error;
use crate::mail;
use crate::CONFIG;
use chrono::{Duration, NaiveDateTime, Utc};
use std::ops::Add;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![get_email, send_email_login, send_email, email,] routes![get_email, send_email_login, send_email, email,]
@@ -59,7 +56,7 @@ fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> Empty
/// Generate the token, save the data for later verification and send email to user /// Generate the token, save the data for later verification and send email to user
pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult { pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
let type_ = TwoFactorType::Email as i32; let type_ = TwoFactorType::Email as i32;
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, &conn)?; let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, &conn).map_res("Two factor not found")?;
let generated_token = crypto::generate_token(CONFIG.email_token_size())?; let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
@@ -68,7 +65,7 @@ pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
twofactor.data = twofactor_data.to_json(); twofactor.data = twofactor_data.to_json();
twofactor.save(&conn)?; twofactor.save(&conn)?;
mail::send_token(&twofactor_data.email, &twofactor_data.last_token?)?; mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
Ok(()) Ok(())
} }
@@ -135,7 +132,7 @@ fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -
); );
twofactor.save(&conn)?; twofactor.save(&conn)?;
mail::send_token(&twofactor_data.email, &twofactor_data.last_token?)?; mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
Ok(()) Ok(())
} }
@@ -159,7 +156,7 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
} }
let type_ = TwoFactorType::EmailVerificationChallenge as i32; let type_ = TwoFactorType::EmailVerificationChallenge as i32;
let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn)?; let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).map_res("Two factor not found")?;
let mut email_data = EmailTokenData::from_json(&twofactor.data)?; let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
@@ -189,7 +186,7 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
/// Validate the email code when used as TwoFactor token mechanism /// Validate the email code when used as TwoFactor token mechanism
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult { pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
let mut email_data = EmailTokenData::from_json(&data)?; let mut email_data = EmailTokenData::from_json(&data)?;
let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn)?; let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn).map_res("Two factor not found")?;
let issued_token = match &email_data.last_token { let issued_token = match &email_data.last_token {
Some(t) => t, Some(t) => t,
_ => err!("No token available"), _ => err!("No token available"),
@@ -212,7 +209,7 @@ pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &
let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0); let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
let max_time = CONFIG.email_expiration_time() as i64; let max_time = CONFIG.email_expiration_time() as i64;
if date.add(Duration::seconds(max_time)) < Utc::now().naive_utc() { if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
err!("Token has expired") err!("Token has expired")
} }

View File

@@ -1,22 +1,23 @@
use data_encoding::BASE32; use data_encoding::BASE32;
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json;
use serde_json::Value; use serde_json::Value;
use crate::api::{JsonResult, JsonUpcase, NumberOrString, PasswordData}; use crate::{
use crate::auth::Headers; api::{JsonResult, JsonUpcase, NumberOrString, PasswordData},
use crate::crypto; auth::Headers,
use crate::db::{ crypto,
models::{TwoFactor, User}, db::{
DbConn, models::{TwoFactor, User},
DbConn,
},
}; };
pub(crate) mod authenticator; pub mod authenticator;
pub(crate) mod duo; pub mod duo;
pub(crate) mod email; pub mod email;
pub(crate) mod u2f; pub mod u2f;
pub(crate) mod yubikey; pub mod yubikey;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
let mut routes = routes![ let mut routes = routes![
@@ -37,15 +38,15 @@ pub fn routes() -> Vec<Route> {
} }
#[get("/two-factor")] #[get("/two-factor")]
fn get_twofactor(headers: Headers, conn: DbConn) -> JsonResult { fn get_twofactor(headers: Headers, conn: DbConn) -> Json<Value> {
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn); let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_list).collect(); let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
Ok(Json(json!({ Json(json!({
"Data": twofactors_json, "Data": twofactors_json,
"Object": "list", "Object": "list",
"ContinuationToken": null, "ContinuationToken": null,
}))) }))
} }
#[post("/two-factor/get-recover", data = "<data>")] #[post("/two-factor/get-recover", data = "<data>")]

View File

@@ -1,21 +1,26 @@
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json;
use serde_json::Value; use serde_json::Value;
use u2f::messages::{RegisterResponse, SignResponse, U2fSignRequest}; use u2f::{
use u2f::protocol::{Challenge, U2f}; messages::{RegisterResponse, SignResponse, U2fSignRequest},
use u2f::register::Registration; protocol::{Challenge, U2f},
register::Registration,
use crate::api::core::two_factor::_generate_recover_code; };
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
use crate::auth::Headers; use crate::{
use crate::db::{ api::{
models::{TwoFactor, TwoFactorType}, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString,
DbConn, PasswordData,
},
auth::Headers,
db::{
models::{TwoFactor, TwoFactorType},
DbConn,
},
error::Error,
CONFIG,
}; };
use crate::error::Error;
use crate::CONFIG;
const U2F_VERSION: &str = "U2F_V2"; const U2F_VERSION: &str = "U2F_V2";
@@ -126,12 +131,12 @@ struct RegisterResponseCopy {
pub error_code: Option<NumberOrString>, pub error_code: Option<NumberOrString>,
} }
impl Into<RegisterResponse> for RegisterResponseCopy { impl From<RegisterResponseCopy> for RegisterResponse {
fn into(self) -> RegisterResponse { fn from(r: RegisterResponseCopy) -> RegisterResponse {
RegisterResponse { RegisterResponse {
registration_data: self.registration_data, registration_data: r.registration_data,
version: self.version, version: r.version,
client_data: self.client_data, client_data: r.client_data,
} }
} }
} }

View File

@@ -1,19 +1,18 @@
use rocket::Route; use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json;
use serde_json::Value; use serde_json::Value;
use yubico::config::Config; use yubico::{config::Config, verify};
use yubico::verify;
use crate::api::core::two_factor::_generate_recover_code; use crate::{
use crate::api::{EmptyResult, JsonResult, JsonUpcase, PasswordData}; api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
use crate::auth::Headers; auth::Headers,
use crate::db::{ db::{
models::{TwoFactor, TwoFactorType}, models::{TwoFactor, TwoFactorType},
DbConn, DbConn,
},
error::{Error, MapResult},
CONFIG,
}; };
use crate::error::{Error, MapResult};
use crate::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![generate_yubikey, activate_yubikey, activate_yubikey_put,] routes![generate_yubikey, activate_yubikey, activate_yubikey_put,]

View File

@@ -1,50 +1,84 @@
use std::{
collections::HashMap,
fs::{create_dir_all, remove_file, symlink_metadata, File},
io::prelude::*,
net::{IpAddr, ToSocketAddrs},
sync::RwLock,
time::{Duration, SystemTime},
};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use std::fs::{create_dir_all, remove_file, symlink_metadata, File};
use std::io::prelude::*;
use std::net::ToSocketAddrs;
use std::time::{Duration, SystemTime};
use rocket::http::ContentType;
use rocket::response::Content;
use rocket::Route;
use reqwest::{Url, header::HeaderMap, blocking::Client, blocking::Response};
use rocket::http::Cookie;
use regex::Regex; use regex::Regex;
use soup::prelude::*; use reqwest::{blocking::Client, blocking::Response, header, Url};
use rocket::{http::ContentType, http::Cookie, response::Content, Route};
use crate::error::Error; use crate::{error::Error, util::Cached, CONFIG};
use crate::CONFIG;
use crate::util::Cached;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![icon] routes![icon]
} }
const FALLBACK_ICON: &[u8; 344] = include_bytes!("../static/fallback-icon.png");
const ALLOWED_CHARS: &str = "_-."; const ALLOWED_CHARS: &str = "_-.";
static CLIENT: Lazy<Client> = Lazy::new(|| { static CLIENT: Lazy<Client> = Lazy::new(|| {
// Generate the default headers
let mut default_headers = header::HeaderMap::new();
default_headers.insert(header::USER_AGENT, header::HeaderValue::from_static("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15"));
default_headers.insert(header::ACCEPT_LANGUAGE, header::HeaderValue::from_static("en-US,en;q=0.8"));
default_headers.insert(header::CACHE_CONTROL, header::HeaderValue::from_static("no-cache"));
default_headers.insert(header::PRAGMA, header::HeaderValue::from_static("no-cache"));
default_headers.insert(header::ACCEPT, header::HeaderValue::from_static("text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8"));
// Reuse the client between requests // Reuse the client between requests
Client::builder() Client::builder()
.timeout(Duration::from_secs(CONFIG.icon_download_timeout())) .timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
.default_headers(_header_map()) .default_headers(default_headers)
.build() .build()
.unwrap() .unwrap()
}); });
// Build Regex only once since this takes a lot of time.
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap());
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
// Special HashMap which holds the user defined Regex to speedup matching the regex.
static ICON_BLACKLIST_REGEX: Lazy<RwLock<HashMap<String, Regex>>> = Lazy::new(|| RwLock::new(HashMap::new()));
#[get("/<domain>/icon.png")]
fn icon(domain: String) -> Option<Cached<Content<Vec<u8>>>> {
if !is_valid_domain(&domain) {
warn!("Invalid domain: {}", domain);
return None;
}
get_icon(&domain).map(|icon| Cached::ttl(Content(ContentType::new("image", "x-icon"), icon), CONFIG.icon_cache_ttl()))
}
/// Returns if the domain provided is valid or not.
///
/// This does some manual checks and makes use of Url to do some basic checking.
/// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255.
fn is_valid_domain(domain: &str) -> bool { fn is_valid_domain(domain: &str) -> bool {
// Don't allow empty or too big domains or path traversal // If parsing the domain fails using Url, it will not work with reqwest.
if domain.is_empty() || domain.len() > 255 || domain.contains("..") { if let Err(parse_error) = Url::parse(format!("https://{}", domain).as_str()) {
debug!("Domain parse error: '{}' - {:?}", domain, parse_error);
return false;
} else if domain.is_empty()
|| domain.contains("..")
|| domain.starts_with('.')
|| domain.starts_with('-')
|| domain.ends_with('-')
{
debug!("Domain validation error: '{}' is either empty, contains '..', starts with an '.', starts or ends with a '-'", domain);
return false;
} else if domain.len() > 255 {
debug!("Domain validation error: '{}' exceeds 255 characters", domain);
return false; return false;
} }
// Only alphanumeric or specific characters
for c in domain.chars() { for c in domain.chars() {
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) { if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
debug!("Domain validation error: '{}' contains an invalid character '{}'", domain, c);
return false; return false;
} }
} }
@@ -52,25 +86,112 @@ fn is_valid_domain(domain: &str) -> bool {
true true
} }
#[get("/<domain>/icon.png")] /// TODO: This is extracted from IpAddr::is_global, which is unstable:
fn icon(domain: String) -> Cached<Content<Vec<u8>>> { /// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
let icon_type = ContentType::new("image", "x-icon"); /// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
#[allow(clippy::nonminimal_bool)]
if !is_valid_domain(&domain) { #[cfg(not(feature = "unstable"))]
warn!("Invalid domain: {:#?}", domain); fn is_global(ip: IpAddr) -> bool {
return Cached::long(Content(icon_type, FALLBACK_ICON.to_vec())); match ip {
IpAddr::V4(ip) => {
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
// globally routable addresses in the 192.0.0.0/24 range.
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
return true;
}
!ip.is_private()
&& !ip.is_loopback()
&& !ip.is_link_local()
&& !ip.is_broadcast()
&& !ip.is_documentation()
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
// Make sure the address is not in 0.0.0.0/8
&& ip.octets()[0] != 0
}
IpAddr::V6(ip) => {
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
true
} else {
!ip.is_multicast()
&& !ip.is_loopback()
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
&& !ip.is_unspecified()
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
}
}
} }
Cached::long(Content(icon_type, get_icon(&domain)))
} }
fn check_icon_domain_is_blacklisted(domain: &str) -> bool { #[cfg(feature = "unstable")]
fn is_global(ip: IpAddr) -> bool {
ip.is_global()
}
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo test --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use ring::rand::{SecureRandom, SystemRandom};
let mut v = [0u8; 16];
let rand = SystemRandom::new();
for i in 0..1_000 {
println!("Iter: {}/1_000", i);
for _ in 0..10_000_000 {
rand.fill(&mut v).expect("Error generating random values");
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
(v[14] as u16) << 8 | v[15] as u16,
(v[12] as u16) << 8 | v[13] as u16,
(v[10] as u16) << 8 | v[11] as u16,
(v[8] as u16) << 8 | v[9] as u16,
(v[6] as u16) << 8 | v[7] as u16,
(v[4] as u16) << 8 | v[5] as u16,
(v[2] as u16) << 8 | v[3] as u16,
(v[0] as u16) << 8 | v[1] as u16,
));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
fn is_domain_blacklisted(domain: &str) -> bool {
let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips() let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips()
&& (domain, 0) && (domain, 0)
.to_socket_addrs() .to_socket_addrs()
.map(|x| { .map(|x| {
for ip_port in x { for ip_port in x {
if !ip_port.ip().is_global() { if !is_global(ip_port.ip()) {
warn!("IP {} for domain '{}' is not a global IP!", ip_port.ip(), domain); warn!("IP {} for domain '{}' is not a global IP!", ip_port.ip(), domain);
return true; return true;
} }
@@ -82,7 +203,31 @@ fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
// Skip the regex check if the previous one is true already // Skip the regex check if the previous one is true already
if !is_blacklisted { if !is_blacklisted {
if let Some(blacklist) = CONFIG.icon_blacklist_regex() { if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
let regex = Regex::new(&blacklist).expect("Valid Regex"); let mut regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
let regex = if let Some(regex) = regex_hashmap.get(&blacklist) {
regex
} else {
drop(regex_hashmap);
let mut regex_hashmap_write = ICON_BLACKLIST_REGEX.write().unwrap();
// Clear the current list if the previous key doesn't exists.
// To prevent growing of the HashMap after someone has changed it via the admin interface.
if regex_hashmap_write.len() >= 1 {
regex_hashmap_write.clear();
}
// Generate the regex to store in too the Lazy Static HashMap.
let blacklist_regex = Regex::new(&blacklist).unwrap();
regex_hashmap_write.insert(blacklist.to_string(), blacklist_regex);
drop(regex_hashmap_write);
regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
regex_hashmap.get(&blacklist).unwrap()
};
// Use the pre-generate Regex stored in a Lazy HashMap.
if regex.is_match(&domain) { if regex.is_match(&domain) {
warn!("Blacklisted domain: {:#?} matched {:#?}", domain, blacklist); warn!("Blacklisted domain: {:#?} matched {:#?}", domain, blacklist);
is_blacklisted = true; is_blacklisted = true;
@@ -93,39 +238,38 @@ fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
is_blacklisted is_blacklisted
} }
fn get_icon(domain: &str) -> Vec<u8> { fn get_icon(domain: &str) -> Option<Vec<u8>> {
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain); let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
// Check for expiration of negatively cached copy
if icon_is_negcached(&path) {
return None;
}
if let Some(icon) = get_cached_icon(&path) { if let Some(icon) = get_cached_icon(&path) {
return icon; return Some(icon);
} }
if CONFIG.disable_icon_download() { if CONFIG.disable_icon_download() {
return FALLBACK_ICON.to_vec(); return None;
} }
// Get the icon, or fallback in case of error // Get the icon, or None in case of error
match download_icon(&domain) { match download_icon(&domain) {
Ok(icon) => { Ok(icon) => {
save_icon(&path, &icon); save_icon(&path, &icon);
icon Some(icon)
} }
Err(e) => { Err(e) => {
error!("Error downloading icon: {:?}", e); error!("Error downloading icon: {:?}", e);
let miss_indicator = path + ".miss"; let miss_indicator = path + ".miss";
let empty_icon = Vec::new(); save_icon(&miss_indicator, &[]);
save_icon(&miss_indicator, &empty_icon); None
FALLBACK_ICON.to_vec()
} }
} }
} }
fn get_cached_icon(path: &str) -> Option<Vec<u8>> { fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
// Check for expiration of negatively cached copy
if icon_is_negcached(path) {
return Some(FALLBACK_ICON.to_vec());
}
// Check for expiration of successfully cached copy // Check for expiration of successfully cached copy
if icon_is_expired(path) { if icon_is_expired(path) {
return None; return None;
@@ -182,11 +326,55 @@ struct Icon {
} }
impl Icon { impl Icon {
fn new(priority: u8, href: String) -> Self { const fn new(priority: u8, href: String) -> Self {
Self { href, priority } Self { href, priority }
} }
} }
fn get_favicons_node(node: &std::rc::Rc<markup5ever_rcdom::Node>, icons: &mut Vec<Icon>, url: &Url) {
if let markup5ever_rcdom::NodeData::Element { name, attrs, .. } = &node.data {
if name.local.as_ref() == "link" {
let mut has_rel = false;
let mut href = None;
let mut sizes = None;
let attrs = attrs.borrow();
for attr in attrs.iter() {
let attr_name = attr.name.local.as_ref();
let attr_value = attr.value.as_ref();
if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) {
has_rel = true;
} else if attr_name == "href" {
href = Some(attr_value);
} else if attr_name == "sizes" {
sizes = Some(attr_value);
}
}
if has_rel {
if let Some(inner_href) = href {
if let Ok(full_href) = url.join(&inner_href).map(|h| h.into_string()) {
let priority = get_icon_priority(&full_href, sizes);
icons.push(Icon::new(priority, full_href));
}
}
}
}
}
// TODO: Might want to limit the recursion depth?
for child in node.children.borrow().iter() {
get_favicons_node(child, icons, url);
}
}
struct IconUrlResult {
iconlist: Vec<Icon>,
cookies: String,
referer: String,
}
/// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response. /// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response.
/// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies. /// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies.
/// This does not mean that that location does exists, but it is the default location browser use. /// This does not mean that that location does exists, but it is the default location browser use.
@@ -199,24 +387,65 @@ impl Icon {
/// let (mut iconlist, cookie_str) = get_icon_url("github.com")?; /// let (mut iconlist, cookie_str) = get_icon_url("github.com")?;
/// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?; /// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?;
/// ``` /// ```
fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> { fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
// Default URL with secure and insecure schemes // Default URL with secure and insecure schemes
let ssldomain = format!("https://{}", domain); let ssldomain = format!("https://{}", domain);
let httpdomain = format!("http://{}", domain); let httpdomain = format!("http://{}", domain);
// First check the domain as given during the request for both HTTPS and HTTP.
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)) {
Ok(c) => Ok(c),
Err(e) => {
let mut sub_resp = Err(e);
// When the domain is not an IP, and has more then one dot, remove all subdomains.
let is_ip = domain.parse::<IpAddr>();
if is_ip.is_err() && domain.matches('.').count() > 1 {
let mut domain_parts = domain.split('.');
let base_domain = format!(
"{base}.{tld}",
tld = domain_parts.next_back().unwrap(),
base = domain_parts.next_back().unwrap()
);
if is_valid_domain(&base_domain) {
let sslbase = format!("https://{}", base_domain);
let httpbase = format!("http://{}", base_domain);
debug!("[get_icon_url]: Trying without subdomains '{}'", base_domain);
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase));
}
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
let www_domain = format!("www.{}", domain);
if is_valid_domain(&www_domain) {
let sslwww = format!("https://{}", www_domain);
let httpwww = format!("http://{}", www_domain);
debug!("[get_icon_url]: Trying with www. prefix '{}'", www_domain);
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww));
}
}
sub_resp
}
};
// Create the iconlist // Create the iconlist
let mut iconlist: Vec<Icon> = Vec::new(); let mut iconlist: Vec<Icon> = Vec::new();
// Create the cookie_str to fill it all the cookies from the response // Create the cookie_str to fill it all the cookies from the response
// These cookies can be used to request/download the favicon image. // These cookies can be used to request/download the favicon image.
// Some sites have extra security in place with for example XSRF Tokens. // Some sites have extra security in place with for example XSRF Tokens.
let mut cookie_str = String::new(); let mut cookie_str = "".to_string();
let mut referer = "".to_string();
let resp = get_page(&ssldomain).or_else(|_| get_page(&httpdomain)); if let Ok(content) = resp {
if let Ok(mut content) = resp {
// Extract the URL from the respose in case redirects occured (like @ gitlab.com) // Extract the URL from the respose in case redirects occured (like @ gitlab.com)
let url = content.url().clone(); let url = content.url().clone();
// Get all the cookies and pass it on to the next function.
// Needed for XSRF Cookies for example (like @ mijn.ing.nl)
let raw_cookies = content.headers().get_all("set-cookie"); let raw_cookies = content.headers().get_all("set-cookie");
cookie_str = raw_cookies cookie_str = raw_cookies
.iter() .iter()
@@ -230,31 +459,23 @@ fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
}) })
.collect::<String>(); .collect::<String>();
// Set the referer to be used on the final request, some sites check this.
// Mostly used to prevent direct linking and other security resons.
referer = url.as_str().to_string();
// Add the default favicon.ico to the list with the domain the content responded from. // Add the default favicon.ico to the list with the domain the content responded from.
iconlist.push(Icon::new(35, url.join("/favicon.ico").unwrap().into_string())); iconlist.push(Icon::new(35, url.join("/favicon.ico").unwrap().into_string()));
// 512KB should be more than enough for the HTML, though as we only really need // 512KB should be more than enough for the HTML, though as we only really need
// the HTML header, it could potentially be reduced even further // the HTML header, it could potentially be reduced even further
let limited_reader = crate::util::LimitedReader::new(&mut content, 512 * 1024); let mut limited_reader = content.take(512 * 1024);
let soup = Soup::from_reader(limited_reader)?; use html5ever::tendril::TendrilSink;
// Search for and filter let dom = html5ever::parse_document(markup5ever_rcdom::RcDom::default(), Default::default())
let favicons = soup .from_utf8()
.tag("link") .read_from(&mut limited_reader)?;
.attr("rel", Regex::new(r"icon$|apple.*icon")?) // Only use icon rels
.attr("href", Regex::new(r"(?i)\w+\.(jpg|jpeg|png|ico)(\?.*)?$|^data:image.*base64")?) // Only allow specific extensions
.find_all();
// Loop through all the found icons and determine it's priority get_favicons_node(&dom.document, &mut iconlist, &url);
for favicon in favicons {
let sizes = favicon.get("sizes");
let href = favicon.get("href").expect("Missing href");
let full_href = url.join(&href).unwrap().into_string();
let priority = get_icon_priority(&full_href, sizes);
iconlist.push(Icon::new(priority, full_href))
}
} else { } else {
// Add the default favicon.ico to the list with just the given domain // Add the default favicon.ico to the list with just the given domain
iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain))); iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain)));
@@ -265,28 +486,33 @@ fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
iconlist.sort_by_key(|x| x.priority); iconlist.sort_by_key(|x| x.priority);
// There always is an icon in the list, so no need to check if it exists, and just return the first one // There always is an icon in the list, so no need to check if it exists, and just return the first one
Ok((iconlist, cookie_str)) Ok(IconUrlResult{
iconlist,
cookies: cookie_str,
referer
})
} }
fn get_page(url: &str) -> Result<Response, Error> { fn get_page(url: &str) -> Result<Response, Error> {
get_page_with_cookies(url, "") get_page_with_cookies(url, "", "")
} }
fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error> { fn get_page_with_cookies(url: &str, cookie_str: &str, referer: &str) -> Result<Response, Error> {
if check_icon_domain_is_blacklisted(Url::parse(url).unwrap().host_str().unwrap_or_default()) { if is_domain_blacklisted(Url::parse(url).unwrap().host_str().unwrap_or_default()) {
err!("Favicon rel linked to a non blacklisted domain!"); err!("Favicon rel linked to a blacklisted domain!");
} }
if cookie_str.is_empty() { let mut client = CLIENT.get(url);
CLIENT.get(url).send()?.error_for_status().map_err(Into::into) if !cookie_str.is_empty() {
} else { client = client.header("Cookie", cookie_str)
CLIENT
.get(url)
.header("cookie", cookie_str)
.send()?
.error_for_status()
.map_err(Into::into)
} }
if !referer.is_empty() {
client = client.header("Referer", referer)
}
client.send()?
.error_for_status()
.map_err(Into::into)
} }
/// Returns a Integer with the priority of the type of the icon which to prefer. /// Returns a Integer with the priority of the type of the icon which to prefer.
@@ -301,7 +527,7 @@ fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error>
/// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32"); /// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32");
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", ""); /// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
/// ``` /// ```
fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 { fn get_icon_priority(href: &str, sizes: Option<&str>) -> u8 {
// Check if there is a dimension set // Check if there is a dimension set
let (width, height) = parse_sizes(sizes); let (width, height) = parse_sizes(sizes);
@@ -314,7 +540,7 @@ fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
1 1
} else if width == 64 { } else if width == 64 {
2 2
} else if width >= 24 && width <= 128 { } else if (24..=128).contains(&width) {
3 3
} else if width == 16 { } else if width == 16 {
4 4
@@ -349,12 +575,12 @@ fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
/// let (width, height) = parse_sizes("x128x128"); // (128, 128) /// let (width, height) = parse_sizes("x128x128"); // (128, 128)
/// let (width, height) = parse_sizes("32"); // (0, 0) /// let (width, height) = parse_sizes("32"); // (0, 0)
/// ``` /// ```
fn parse_sizes(sizes: Option<String>) -> (u16, u16) { fn parse_sizes(sizes: Option<&str>) -> (u16, u16) {
let mut width: u16 = 0; let mut width: u16 = 0;
let mut height: u16 = 0; let mut height: u16 = 0;
if let Some(sizes) = sizes { if let Some(sizes) = sizes {
match Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap().captures(sizes.trim()) { match ICON_SIZE_REGEX.captures(sizes.trim()) {
None => {} None => {}
Some(dimensions) => { Some(dimensions) => {
if dimensions.len() >= 3 { if dimensions.len() >= 3 {
@@ -369,17 +595,17 @@ fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
} }
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> { fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
if check_icon_domain_is_blacklisted(domain) { if is_domain_blacklisted(domain) {
err!("Domain is blacklisted", domain) err!("Domain is blacklisted", domain)
} }
let (iconlist, cookie_str) = get_icon_url(&domain)?; let icon_result = get_icon_url(&domain)?;
let mut buffer = Vec::new(); let mut buffer = Vec::new();
use data_url::DataUrl; use data_url::DataUrl;
for icon in iconlist.iter().take(5) { for icon in icon_result.iconlist.iter().take(5) {
if icon.href.starts_with("data:image") { if icon.href.starts_with("data:image") {
let datauri = DataUrl::process(&icon.href).unwrap(); let datauri = DataUrl::process(&icon.href).unwrap();
// Check if we are able to decode the data uri // Check if we are able to decode the data uri
@@ -394,13 +620,13 @@ fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
_ => warn!("data uri is invalid"), _ => warn!("data uri is invalid"),
}; };
} else { } else {
match get_page_with_cookies(&icon.href, &cookie_str) { match get_page_with_cookies(&icon.href, &icon_result.cookies, &icon_result.referer) {
Ok(mut res) => { Ok(mut res) => {
info!("Downloaded icon from {}", icon.href); info!("Downloaded icon from {}", icon.href);
res.copy_to(&mut buffer)?; res.copy_to(&mut buffer)?;
break; break;
} },
Err(_) => info!("Download failed for {}", icon.href), _ => warn!("Download failed for {}", icon.href),
}; };
} }
} }
@@ -425,25 +651,3 @@ fn save_icon(path: &str, icon: &[u8]) {
} }
} }
} }
fn _header_map() -> HeaderMap {
// Set some default headers for the request.
// Use a browser like user-agent to make sure most websites will return there correct website.
use reqwest::header::*;
macro_rules! headers {
($( $name:ident : $value:literal),+ $(,)? ) => {
let mut headers = HeaderMap::new();
$( headers.insert($name, HeaderValue::from_static($value)); )+
headers
};
}
headers! {
USER_AGENT: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299",
ACCEPT_LANGUAGE: "en-US,en;q=0.8",
CACHE_CONTROL: "no-cache",
PRAGMA: "no-cache",
ACCEPT: "text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8",
}
}

View File

@@ -1,19 +1,22 @@
use chrono::Utc; use chrono::Local;
use num_traits::FromPrimitive; use num_traits::FromPrimitive;
use rocket::request::{Form, FormItems, FromForm}; use rocket::{
use rocket::Route; request::{Form, FormItems, FromForm},
Route,
};
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
use crate::api::core::two_factor::email::EmailTokenData; use crate::{
use crate::api::core::two_factor::{duo, email, yubikey}; api::{
use crate::api::{ApiResult, EmptyResult, JsonResult}; core::two_factor::{duo, email, email::EmailTokenData, yubikey},
use crate::auth::ClientIp; ApiResult, EmptyResult, JsonResult,
use crate::db::models::*; },
use crate::db::DbConn; auth::ClientIp,
use crate::mail; db::{models::*, DbConn},
use crate::util; error::MapResult,
use crate::CONFIG; mail, util, CONFIG,
};
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![login] routes![login]
@@ -38,7 +41,7 @@ fn login(data: Form<ConnectData>, conn: DbConn, ip: ClientIp) -> JsonResult {
_check_is_some(&data.device_name, "device_name cannot be blank")?; _check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?; _check_is_some(&data.device_type, "device_type cannot be blank")?;
_password_login(data, conn, ip) _password_login(data, conn, &ip)
} }
t => err!("Invalid type", t), t => err!("Invalid type", t),
} }
@@ -49,10 +52,7 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
let token = data.refresh_token.unwrap(); let token = data.refresh_token.unwrap();
// Get device by refresh token // Get device by refresh token
let mut device = match Device::find_by_refresh_token(&token, &conn) { let mut device = Device::find_by_refresh_token(&token, &conn).map_res("Invalid refresh token")?;
Some(device) => device,
None => err!("Invalid refresh token"),
};
// COMMON // COMMON
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap(); let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
@@ -68,10 +68,15 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
"refresh_token": device.refresh_token, "refresh_token": device.refresh_token,
"Key": user.akey, "Key": user.akey,
"PrivateKey": user.private_key, "PrivateKey": user.private_key,
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
"scope": "api offline_access"
}))) })))
} }
fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult { fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult {
// Validate scope // Validate scope
let scope = data.scope.as_ref().unwrap(); let scope = data.scope.as_ref().unwrap();
if scope != "api offline_access" { if scope != "api offline_access" {
@@ -97,8 +102,18 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
) )
} }
// Check if the user is disabled
if !user.enabled {
err!(
"This user has been disabled",
format!("IP: {}. Username: {}.", ip.ip, username)
)
}
let now = Local::now();
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() { if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
let now = Utc::now().naive_utc(); let now = now.naive_utc();
if user.last_verifying_at.is_none() || now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds() > CONFIG.signups_verify_resend_time() as i64 { if user.last_verifying_at.is_none() || now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds() > CONFIG.signups_verify_resend_time() as i64 {
let resend_limit = CONFIG.signups_verify_resend_limit() as i32; let resend_limit = CONFIG.signups_verify_resend_limit() as i32;
if resend_limit == 0 || user.login_verify_count < resend_limit { if resend_limit == 0 || user.login_verify_count < resend_limit {
@@ -127,10 +142,10 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
let (mut device, new_device) = get_device(&data, &conn, &user); let (mut device, new_device) = get_device(&data, &conn, &user);
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &conn)?; let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &ip, &conn)?;
if CONFIG.mail_enabled() && new_device { if CONFIG.mail_enabled() && new_device {
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &device.updated_at, &device.name) { if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) {
error!("Error sending new device email: {:#?}", e); error!("Error sending new device email: {:#?}", e);
if CONFIG.require_device_email() { if CONFIG.require_device_email() {
@@ -140,7 +155,6 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
} }
// Common // Common
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
let orgs = UserOrganization::find_by_user(&user.uuid, &conn); let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
let (access_token, expires_in) = device.refresh_tokens(&user, orgs); let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
@@ -154,6 +168,11 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
"Key": user.akey, "Key": user.akey,
"PrivateKey": user.private_key, "PrivateKey": user.private_key,
//"TwoFactorToken": "11122233333444555666777888999" //"TwoFactorToken": "11122233333444555666777888999"
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"ResetMasterPassword": false,// TODO: Same as above
"scope": "api offline_access"
}); });
if let Some(token) = twofactor_token { if let Some(token) = twofactor_token {
@@ -197,6 +216,7 @@ fn twofactor_auth(
user_uuid: &str, user_uuid: &str,
data: &ConnectData, data: &ConnectData,
device: &mut Device, device: &mut Device,
ip: &ClientIp,
conn: &DbConn, conn: &DbConn,
) -> ApiResult<Option<String>> { ) -> ApiResult<Option<String>> {
let twofactors = TwoFactor::find_by_user(user_uuid, conn); let twofactors = TwoFactor::find_by_user(user_uuid, conn);
@@ -216,8 +236,7 @@ fn twofactor_auth(
let selected_twofactor = twofactors let selected_twofactor = twofactors
.into_iter() .into_iter()
.filter(|tf| tf.atype == selected_id && tf.enabled) .find(|tf| tf.atype == selected_id && tf.enabled);
.nth(0);
use crate::api::core::two_factor as _tf; use crate::api::core::two_factor as _tf;
use crate::crypto::ct_eq; use crate::crypto::ct_eq;
@@ -226,7 +245,7 @@ fn twofactor_auth(
let mut remember = data.two_factor_remember.unwrap_or(0); let mut remember = data.two_factor_remember.unwrap_or(0);
match TwoFactorType::from_i32(selected_id) { match TwoFactorType::from_i32(selected_id) {
Some(TwoFactorType::Authenticator) => _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, conn)?, Some(TwoFactorType::Authenticator) => _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn)?,
Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?, Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?,
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?, Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?,
Some(TwoFactorType::Duo) => _tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?, Some(TwoFactorType::Duo) => _tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?,
@@ -252,10 +271,7 @@ fn twofactor_auth(
} }
fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> { fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
match tf { tf.map(|t| t.data).map_res("Two factor doesn't exist")
Some(tf) => Ok(tf.data),
None => err!("Two factor doesn't exist"),
}
} }
fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> { fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {
@@ -347,6 +363,7 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
Ok(result) Ok(result)
} }
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
#[derive(Debug, Clone, Default)] #[derive(Debug, Clone, Default)]
#[allow(non_snake_case)] #[allow(non_snake_case)]
struct ConnectData { struct ConnectData {
@@ -364,6 +381,7 @@ struct ConnectData {
device_identifier: Option<String>, device_identifier: Option<String>,
device_name: Option<String>, device_name: Option<String>,
device_type: Option<String>, device_type: Option<String>,
device_push_token: Option<String>, // Unused; mobile device push not yet supported.
// Needed for two-factor auth // Needed for two-factor auth
two_factor_provider: Option<i32>, two_factor_provider: Option<i32>,
@@ -391,6 +409,7 @@ impl<'f> FromForm<'f> for ConnectData {
"deviceidentifier" => form.device_identifier = Some(value), "deviceidentifier" => form.device_identifier = Some(value),
"devicename" => form.device_name = Some(value), "devicename" => form.device_name = Some(value),
"devicetype" => form.device_type = Some(value), "devicetype" => form.device_type = Some(value),
"devicepushtoken" => form.device_push_token = Some(value),
"twofactorprovider" => form.two_factor_provider = value.parse().ok(), "twofactorprovider" => form.two_factor_provider = value.parse().ok(),
"twofactortoken" => form.two_factor_token = Some(value), "twofactortoken" => form.two_factor_token = Some(value),
"twofactorremember" => form.two_factor_remember = value.parse().ok(), "twofactorremember" => form.two_factor_remember = value.parse().ok(),

View File

@@ -1,27 +1,30 @@
mod admin; mod admin;
pub(crate) mod core; pub mod core;
mod icons; mod icons;
mod identity; mod identity;
mod notifications; mod notifications;
mod web; mod web;
pub use self::admin::routes as admin_routes;
pub use self::core::routes as core_routes;
pub use self::icons::routes as icons_routes;
pub use self::identity::routes as identity_routes;
pub use self::notifications::routes as notifications_routes;
pub use self::notifications::{start_notification_server, Notify, UpdateType};
pub use self::web::routes as web_routes;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value; use serde_json::Value;
pub use crate::api::{
admin::routes as admin_routes,
core::routes as core_routes,
core::start_send_deletion_scheduler,
icons::routes as icons_routes,
identity::routes as identity_routes,
notifications::routes as notifications_routes,
notifications::{start_notification_server, Notify, UpdateType},
web::routes as web_routes,
};
use crate::util;
// Type aliases for API methods results // Type aliases for API methods results
type ApiResult<T> = Result<T, crate::error::Error>; type ApiResult<T> = Result<T, crate::error::Error>;
pub type JsonResult = ApiResult<Json<Value>>; pub type JsonResult = ApiResult<Json<Value>>;
pub type EmptyResult = ApiResult<()>; pub type EmptyResult = ApiResult<()>;
use crate::util;
type JsonUpcase<T> = Json<util::UpCase<T>>; type JsonUpcase<T> = Json<util::UpCase<T>>;
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>; type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;

View File

@@ -4,11 +4,12 @@ use rocket::Route;
use rocket_contrib::json::Json; use rocket_contrib::json::Json;
use serde_json::Value as JsonValue; use serde_json::Value as JsonValue;
use crate::api::{EmptyResult, JsonResult}; use crate::{
use crate::auth::Headers; api::EmptyResult,
use crate::db::DbConn; auth::Headers,
db::DbConn,
use crate::{Error, CONFIG}; Error, CONFIG,
};
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![negotiate, websockets_err] routes![negotiate, websockets_err]
@@ -18,18 +19,19 @@ static SHOW_WEBSOCKETS_MSG: AtomicBool = AtomicBool::new(true);
#[get("/hub")] #[get("/hub")]
fn websockets_err() -> EmptyResult { fn websockets_err() -> EmptyResult {
if CONFIG.websocket_enabled() && SHOW_WEBSOCKETS_MSG.compare_and_swap(true, false, Ordering::Relaxed) { if CONFIG.websocket_enabled() && SHOW_WEBSOCKETS_MSG.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed).is_ok() {
err!("########################################################### err!("
###########################################################
'/notifications/hub' should be proxied to the websocket server or notifications won't work. '/notifications/hub' should be proxied to the websocket server or notifications won't work.
Go to the Wiki for more info, or disable WebSockets setting WEBSOCKET_ENABLED=false. Go to the Wiki for more info, or disable WebSockets setting WEBSOCKET_ENABLED=false.
###########################################################################################") ###########################################################################################\n")
} else { } else {
Err(Error::empty()) Err(Error::empty())
} }
} }
#[post("/hub/negotiate")] #[post("/hub/negotiate")]
fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult { fn negotiate(_headers: Headers, _conn: DbConn) -> Json<JsonValue> {
use crate::crypto; use crate::crypto;
use data_encoding::BASE64URL; use data_encoding::BASE64URL;
@@ -45,10 +47,10 @@ fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
// Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33 // Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33
// {"transport":"ServerSentEvents", "transferFormats":["Text"]}, // {"transport":"ServerSentEvents", "transferFormats":["Text"]},
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]} // {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
Ok(Json(json!({ Json(json!({
"connectionId": conn_id, "connectionId": conn_id,
"availableTransports": available_transports "availableTransports": available_transports
}))) }))
} }
// //
@@ -118,7 +120,7 @@ fn convert_option<T: Into<Value>>(option: Option<T>) -> Value {
} }
// Server WebSocket handler // Server WebSocket handler
pub struct WSHandler { pub struct WsHandler {
out: Sender, out: Sender,
user_uuid: Option<String>, user_uuid: Option<String>,
users: WebSocketUsers, users: WebSocketUsers,
@@ -136,10 +138,9 @@ struct InitialMessage {
const PING_MS: u64 = 15_000; const PING_MS: u64 = 15_000;
const PING: Token = Token(1); const PING: Token = Token(1);
const ID_KEY: &str = "id=";
const ACCESS_TOKEN_KEY: &str = "access_token="; const ACCESS_TOKEN_KEY: &str = "access_token=";
impl WSHandler { impl WsHandler {
fn err(&self, msg: &'static str) -> ws::Result<()> { fn err(&self, msg: &'static str) -> ws::Result<()> {
self.out.close(ws::CloseCode::Invalid)?; self.out.close(ws::CloseCode::Invalid)?;
@@ -147,38 +148,50 @@ impl WSHandler {
let io_error = io::Error::from(io::ErrorKind::InvalidData); let io_error = io::Error::from(io::ErrorKind::InvalidData);
Err(ws::Error::new(ws::ErrorKind::Io(io_error), msg)) Err(ws::Error::new(ws::ErrorKind::Io(io_error), msg))
} }
}
impl Handler for WSHandler { fn get_request_token(&self, hs: Handshake) -> Option<String> {
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> { use std::str::from_utf8;
// Path == "/notifications/hub?id=<id>==&access_token=<access_token>"
let path = hs.request.resource();
let (_id, access_token) = match path.split('?').nth(1) { // Verify we have a token header
Some(params) => { if let Some(header_value) = hs.request.header("Authorization") {
let mut params_iter = params.split('&').take(2); if let Ok(converted) = from_utf8(header_value) {
if let Some(token_part) = converted.split("Bearer ").nth(1) {
let mut id = None; return Some(token_part.into());
let mut access_token = None;
while let Some(val) = params_iter.next() {
if val.starts_with(ID_KEY) {
id = Some(&val[ID_KEY.len()..]);
} else if val.starts_with(ACCESS_TOKEN_KEY) {
access_token = Some(&val[ACCESS_TOKEN_KEY.len()..]);
}
}
match (id, access_token) {
(Some(a), Some(b)) => (a, b),
_ => return self.err("Missing id or access token"),
} }
} }
None => return self.err("Missing query path"), };
// Otherwise verify the query parameter value
let path = hs.request.resource();
if let Some(params) = path.split('?').nth(1) {
let params_iter = params.split('&').take(1);
for val in params_iter {
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
return Some(stripped.into());
}
}
};
None
}
}
impl Handler for WsHandler {
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
// Path == "/notifications/hub?id=<id>==&access_token=<access_token>"
//
// We don't use `id`, and as of around 2020-03-25, the official clients
// no longer seem to pass `id` (only `access_token`).
// Get user token from header or query parameter
let access_token = match self.get_request_token(hs) {
Some(token) => token,
_ => return self.err("Missing access token"),
}; };
// Validate the user // Validate the user
use crate::auth; use crate::auth;
let claims = match auth::decode_login(access_token) { let claims = match auth::decode_login(access_token.as_str()) {
Ok(claims) => claims, Ok(claims) => claims,
Err(_) => return self.err("Invalid access token provided"), Err(_) => return self.err("Invalid access token provided"),
}; };
@@ -227,13 +240,13 @@ impl Handler for WSHandler {
} }
} }
struct WSFactory { struct WsFactory {
pub users: WebSocketUsers, pub users: WebSocketUsers,
} }
impl WSFactory { impl WsFactory {
pub fn init() -> Self { pub fn init() -> Self {
WSFactory { WsFactory {
users: WebSocketUsers { users: WebSocketUsers {
map: Arc::new(CHashMap::new()), map: Arc::new(CHashMap::new()),
}, },
@@ -241,11 +254,11 @@ impl WSFactory {
} }
} }
impl Factory for WSFactory { impl Factory for WsFactory {
type Handler = WSHandler; type Handler = WsHandler;
fn connection_made(&mut self, out: Sender) -> Self::Handler { fn connection_made(&mut self, out: Sender) -> Self::Handler {
WSHandler { WsHandler {
out, out,
user_uuid: None, user_uuid: None,
users: self.users.clone(), users: self.users.clone(),
@@ -256,7 +269,9 @@ impl Factory for WSFactory {
// Remove handler // Remove handler
if let Some(user_uuid) = &handler.user_uuid { if let Some(user_uuid) = &handler.user_uuid {
if let Some(mut user_conn) = self.users.map.get_mut(user_uuid) { if let Some(mut user_conn) = self.users.map.get_mut(user_uuid) {
user_conn.remove_item(&handler.out); if let Some(pos) = user_conn.iter().position(|x| x == &handler.out) {
user_conn.remove(pos);
}
} }
} }
} }
@@ -327,7 +342,7 @@ impl WebSocketUsers {
/* Message Structure /* Message Structure
[ [
1, // MessageType.Invocation 1, // MessageType.Invocation
{}, // Headers {}, // Headers (map)
null, // InvocationId null, // InvocationId
"ReceiveMessage", // Target "ReceiveMessage", // Target
[ // Arguments [ // Arguments
@@ -344,7 +359,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType) -> Vec<u8> {
let value = V::Array(vec![ let value = V::Array(vec![
1.into(), 1.into(),
V::Array(vec![]), V::Map(vec![]),
V::Nil, V::Nil,
"ReceiveMessage".into(), "ReceiveMessage".into(),
V::Array(vec![V::Map(vec![ V::Array(vec![V::Map(vec![
@@ -379,6 +394,10 @@ pub enum UpdateType {
LogOut = 11, LogOut = 11,
SyncSendCreate = 12,
SyncSendUpdate = 13,
SyncSendDelete = 14,
None = 100, None = 100,
} }
@@ -386,15 +405,17 @@ use rocket::State;
pub type Notify<'a> = State<'a, WebSocketUsers>; pub type Notify<'a> = State<'a, WebSocketUsers>;
pub fn start_notification_server() -> WebSocketUsers { pub fn start_notification_server() -> WebSocketUsers {
let factory = WSFactory::init(); let factory = WsFactory::init();
let users = factory.users.clone(); let users = factory.users.clone();
if CONFIG.websocket_enabled() { if CONFIG.websocket_enabled() {
thread::spawn(move || { thread::spawn(move || {
let mut settings = ws::Settings::default(); let settings = ws::Settings {
settings.max_connections = 500; max_connections: 500,
settings.queue_size = 2; queue_size: 2,
settings.panic_on_internal = false; panic_on_internal: false,
..Default::default()
};
ws::Builder::new() ws::Builder::new()
.with_settings(settings) .with_settings(settings)

Some files were not shown because too many files have changed in this diff Show More