mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 18:55:57 +03:00
Compare commits
497 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
fd27759a95 | ||
|
01d8056c73 | ||
|
81fa33ebb5 | ||
|
e8aa3bc066 | ||
|
0bf0125e82 | ||
|
6209e778e5 | ||
|
5323283f98 | ||
|
57e17d0648 | ||
|
da55d5ec70 | ||
|
828a060698 | ||
|
3e5971b9db | ||
|
47c2625d38 | ||
|
49af9cf4f5 | ||
|
6b1daeba05 | ||
|
9f1240d8d9 | ||
|
a8138be69b | ||
|
ea57dc3bc9 | ||
|
131348a49f | ||
|
b22564cb00 | ||
|
16eb0a56f9 | ||
|
3e4ff47a38 | ||
|
8ea01a67f6 | ||
|
aa5cc642e1 | ||
|
a121cb6f00 | ||
|
60164182ae | ||
|
f842a80cdb | ||
|
4b6a574ee0 | ||
|
f9ebb780f9 | ||
|
1fc6c30652 | ||
|
46a1a013cd | ||
|
551810c486 | ||
|
b987ba506d | ||
|
84810f2bb2 | ||
|
424d666a50 | ||
|
a71359f647 | ||
|
d93c344176 | ||
|
b9c3213b90 | ||
|
95e24ffc51 | ||
|
00d56d7295 | ||
|
7436b454db | ||
|
8da5b99482 | ||
|
2969e87b52 | ||
|
ce62e898c3 | ||
|
431462d839 | ||
|
7d0e234b34 | ||
|
dad1b1bee9 | ||
|
9312cebee3 | ||
|
cdf5b6ec2d | ||
|
ce99fc8f95 | ||
|
a75d050001 | ||
|
75cfd10f11 | ||
|
9859ba6339 | ||
|
513056f711 | ||
|
ebe334fcc7 | ||
|
0eec12472e | ||
|
39106d440a | ||
|
9117095764 | ||
|
099bba950c | ||
|
e37ff60617 | ||
|
5b14608041 | ||
|
ad92692bab | ||
|
d956d42903 | ||
|
d69be7d03a | ||
|
f82de8d00d | ||
|
c836f88ff2 | ||
|
8b660ae090 | ||
|
9323c57f49 | ||
|
85e3c73525 | ||
|
a74bc2e58f | ||
|
0680638933 | ||
|
46d31ee5f7 | ||
|
e794b397d3 | ||
|
d41350050b | ||
|
4cd5b06b7f | ||
|
cd768439d2 | ||
|
9e5fd2d576 | ||
|
ecb46f591c | ||
|
d62d53aa8e | ||
|
2c515ab13c | ||
|
83d556ff0c | ||
|
678d313836 | ||
|
705d840ea3 | ||
|
7dff8c01dd | ||
|
5860679624 | ||
|
4628e4519d | ||
|
b884fd20a1 | ||
|
67c657003d | ||
|
580c1bbc7d | ||
|
2b6383d243 | ||
|
f27455a26f | ||
|
1d4f900e48 | ||
|
c5ca588a6f | ||
|
06888251e3 | ||
|
1a6e4cf4e4 | ||
|
9f86196a9d | ||
|
1e31043fb3 | ||
|
85adcf1ae5 | ||
|
9abb4d2873 | ||
|
235ff44736 | ||
|
9c2d741749 | ||
|
37cc0c34cf | ||
|
5633b6ac94 | ||
|
175f2aeace | ||
|
feefe69094 | ||
|
46df3ee7cd | ||
|
bb945ad01b | ||
|
de86aa671e | ||
|
e38771bbbd | ||
|
a3f9a8d7dc | ||
|
4b6bc6ef66 | ||
|
455a23361f | ||
|
1a8ec04733 | ||
|
4e60df7a08 | ||
|
219a9d9f5e | ||
|
48baf723a4 | ||
|
6530904883 | ||
|
d15d24f4ff | ||
|
8d992d637e | ||
|
6ebc83c3b7 | ||
|
b32f4451ee | ||
|
99142c7552 | ||
|
db710bb931 | ||
|
a9e9a397d8 | ||
|
d46a6ac687 | ||
|
1eb5495802 | ||
|
7cf8809d77 | ||
|
043aa27aa3 | ||
|
9824d94a1c | ||
|
e8ef76b8f9 | ||
|
be1ddb4203 | ||
|
caddf21fca | ||
|
5379329ef7 | ||
|
6faaeaae66 | ||
|
3fed323385 | ||
|
58a928547d | ||
|
558410c5bd | ||
|
0dc0decaa7 | ||
|
d11d663c5c | ||
|
771233176f | ||
|
ed70b07d81 | ||
|
e25fc7083d | ||
|
fa364c3f2c | ||
|
b5f9fe4d3b | ||
|
013d4c28b2 | ||
|
63acc8619b | ||
|
ec920b5756 | ||
|
95caaf2a40 | ||
|
7099f8bee8 | ||
|
b41a0d840c | ||
|
c577ade90e | ||
|
257b143df1 | ||
|
34ee326ce9 | ||
|
090104ce1b | ||
|
3305d5dc92 | ||
|
296063e135 | ||
|
b9daa59e5d | ||
|
5bdcfe128d | ||
|
1842a796fb | ||
|
ce99e5c583 | ||
|
0c96c2d305 | ||
|
5796b6b554 | ||
|
c7ab27c86f | ||
|
8c03746a67 | ||
|
8746d36845 | ||
|
448e6ac917 | ||
|
729c9cff41 | ||
|
22b9c80007 | ||
|
ab4355cfed | ||
|
948dc82228 | ||
|
bc74fd23e7 | ||
|
37776241be | ||
|
feba41ec88 | ||
|
6a8f42da8a | ||
|
670d8cb83a | ||
|
2f7fbde789 | ||
|
c698bca2b9 | ||
|
0b6a003a8b | ||
|
c64560016e | ||
|
978be0b4a9 | ||
|
b58bff1178 | ||
|
2f3e18caa9 | ||
|
6a291040bd | ||
|
dbc082dc75 | ||
|
32a0dd09bf | ||
|
f847c6e225 | ||
|
99da5fbebb | ||
|
6a0d024c69 | ||
|
b24929a243 | ||
|
9a47821642 | ||
|
d69968313b | ||
|
3c377d97dc | ||
|
ea15218197 | ||
|
0eee907c88 | ||
|
c877583979 | ||
|
844cf70345 | ||
|
a0d92a167c | ||
|
d7b0d6f9f5 | ||
|
4c3b328aca | ||
|
260ffee093 | ||
|
c59cfe3371 | ||
|
0822c0c128 | ||
|
57a88f0a1b | ||
|
87393409f9 | ||
|
062f5e4712 | ||
|
aaba1e8368 | ||
|
ff2684dfee | ||
|
6b5fa201aa | ||
|
7167e443ca | ||
|
175d647e47 | ||
|
4c324e1160 | ||
|
0365b7c6a4 | ||
|
19889187a5 | ||
|
9571277c44 | ||
|
a202da9e23 | ||
|
e5a77a477d | ||
|
c05dc50f53 | ||
|
3bbdbb832c | ||
|
d9684bef6b | ||
|
db0c45c172 | ||
|
ad4393e3f7 | ||
|
f83a8a36d1 | ||
|
0e9eba8c8b | ||
|
d5c760960a | ||
|
2c6ef2bc68 | ||
|
7032ae5587 | ||
|
eba22c2d94 | ||
|
11cc9ae0c0 | ||
|
fb648db47d | ||
|
959283d333 | ||
|
385c2227e7 | ||
|
6d9f03e84b | ||
|
6a972e4b19 | ||
|
171b174ce9 | ||
|
93b7ded1e6 | ||
|
29c6b145ca | ||
|
a7a479623c | ||
|
83dff9ae6e | ||
|
6b2cc5a3ee | ||
|
5247e0d773 | ||
|
05b308b8b4 | ||
|
9621278fca | ||
|
570d6c8bf9 | ||
|
ad48e9ed0f | ||
|
f724addf9a | ||
|
aa20974703 | ||
|
a846f6c610 | ||
|
c218c34812 | ||
|
2626e66873 | ||
|
81e0e1b339 | ||
|
fd1354d00e | ||
|
071a3b2a32 | ||
|
32cfaab5ee | ||
|
d348f12a0e | ||
|
11845d9f5b | ||
|
de70fbf88a | ||
|
0b04caab78 | ||
|
4c78c5a9c9 | ||
|
73f0841f17 | ||
|
4559e85daa | ||
|
bbef332e25 | ||
|
1e950c7dbc | ||
|
f14e19a3d8 | ||
|
668d5c23dc | ||
|
fb6f96f5c3 | ||
|
6e6e34ff18 | ||
|
790146bfac | ||
|
af625930d6 | ||
|
a28ebcb401 | ||
|
77e47ddd1f | ||
|
5b620ba6cd | ||
|
d5f9b33f66 | ||
|
596c9b8691 | ||
|
d4357eb55a | ||
|
b37f0dfde3 | ||
|
624791e09a | ||
|
f9a73a9bbe | ||
|
35868dd72c | ||
|
979d010dc2 | ||
|
b34d548246 | ||
|
a87646b8cb | ||
|
a2411eef56 | ||
|
52ed8e4d75 | ||
|
24c914799d | ||
|
db53511855 | ||
|
325691e588 | ||
|
fac3cb687d | ||
|
afbf1db331 | ||
|
1aefaec297 | ||
|
f1d3fb5d40 | ||
|
ac2723f898 | ||
|
2fffaec226 | ||
|
5c54dfee3a | ||
|
967d2d78ec | ||
|
1aa5e0d4dc | ||
|
b47cf97409 | ||
|
5e802f8aa3 | ||
|
0bdeb02a31 | ||
|
b03698fadb | ||
|
39d1a09704 | ||
|
a447e4e7ef | ||
|
4eee6e7aee | ||
|
b6fde857a7 | ||
|
3c66deb5cc | ||
|
4146612a32 | ||
|
a314933557 | ||
|
c5d7e3f2bc | ||
|
c95a2881b5 | ||
|
4c3727b4a3 | ||
|
a1f304dff7 | ||
|
a8870eef0d | ||
|
afaebc6cf3 | ||
|
8f4a1f4fc2 | ||
|
0807783388 | ||
|
80d4061d14 | ||
|
dc2f8e5c85 | ||
|
aee1ea032b | ||
|
484e82fb9f | ||
|
322a08edfb | ||
|
08afc312c3 | ||
|
5571a5d8ed | ||
|
6a8c65493f | ||
|
dfdf4473ea | ||
|
8bbbff7567 | ||
|
42e37ebea1 | ||
|
632f4d5453 | ||
|
6c5e35ce5c | ||
|
4ff15f6dc2 | ||
|
ec8028aef2 | ||
|
63cbd9ef9c | ||
|
9cca64003a | ||
|
819d5e2dc8 | ||
|
3b06ab296b | ||
|
0de52c6c99 | ||
|
e3b00b59a7 | ||
|
5a390a973f | ||
|
1ee8e44912 | ||
|
86685c1cd2 | ||
|
e3feba2a2c | ||
|
0a68de6c24 | ||
|
4be8dae626 | ||
|
e4d08836e2 | ||
|
c2a324e5da | ||
|
77f95146d6 | ||
|
6cd8512bbd | ||
|
843604c9e7 | ||
|
7407b8326a | ||
|
adf47827c9 | ||
|
5471088e93 | ||
|
4e85a1dee1 | ||
|
ec60839064 | ||
|
d4bfa1a189 | ||
|
862d401077 | ||
|
255a06382d | ||
|
bbb0484d03 | ||
|
93346bc05d | ||
|
fdf50f0064 | ||
|
ccf6ee79d0 | ||
|
91dd19473d | ||
|
c06162b22f | ||
|
7a6a3e4160 | ||
|
94341f9f3f | ||
|
ff19fb3426 | ||
|
baac8d9627 | ||
|
669b101e6a | ||
|
935f38692f | ||
|
d2d9fb08cc | ||
|
b85d548879 | ||
|
35f30088b2 | ||
|
dce054e632 | ||
|
ba725e1c25 | ||
|
b837348b25 | ||
|
7d9c7017c9 | ||
|
d6b9b8bf0c | ||
|
bd09fe1a3d | ||
|
bcbe6177b8 | ||
|
9b1d07365e | ||
|
37b212427c | ||
|
078234d8b3 | ||
|
3ce0c3d1a5 | ||
|
2ee07ea1d8 | ||
|
40c339db9b | ||
|
402c1cd06c | ||
|
819f340f39 | ||
|
1b4b40c95d | ||
|
afd9f4e278 | ||
|
47a9461f39 | ||
|
c6f64d8368 | ||
|
edabf19ddf | ||
|
a30d5f4cf9 | ||
|
3fa78e7bb1 | ||
|
a8a7e4f9a5 | ||
|
5d3b765a23 | ||
|
70f3ab8ec3 | ||
|
b6612e90ca | ||
|
161cccca30 | ||
|
84dc2eda1f | ||
|
390d10d656 | ||
|
1f775f4414 | ||
|
cc404b4edc | ||
|
536672ac1b | ||
|
e41e7c07db | ||
|
f1d3b03c60 | ||
|
2ebff958a4 | ||
|
edfdda86ae | ||
|
97fb7b5b96 | ||
|
f6de144cbb | ||
|
5a974c7b94 | ||
|
5f61607419 | ||
|
7439aeb63e | ||
|
cd8907542a | ||
|
8a5450e830 | ||
|
ad9f2b2d8e | ||
|
2f4a9865e1 | ||
|
0a3008e753 | ||
|
29a0795219 | ||
|
63459c5f72 | ||
|
916e96b143 | ||
|
325039c316 | ||
|
c5b97f4146 | ||
|
03233429f4 | ||
|
0a72c4b6db | ||
|
8867626de8 | ||
|
f5916ec396 | ||
|
ebb36235a7 | ||
|
def174a517 | ||
|
2798f623d4 | ||
|
480ba933fa | ||
|
3d1ee9ef62 | ||
|
5352321fe1 | ||
|
c4101162d6 | ||
|
632d55265b | ||
|
e277f7d1c1 | ||
|
ff7b4a3d38 | ||
|
d212dfe735 | ||
|
84ed185579 | ||
|
c0ba3406ef | ||
|
e196ba6e86 | ||
|
76743aee48 | ||
|
9ebca99290 | ||
|
a734ad2d36 | ||
|
baf7d1be4e | ||
|
31bcd1bf7c | ||
|
a3b30ed65a | ||
|
59e50b03bd | ||
|
0a88f020e1 | ||
|
c058a1d63c | ||
|
96a189deb9 | ||
|
8c229920ad | ||
|
d592323e39 | ||
|
402c857d17 | ||
|
def858854b | ||
|
f6761ac30e | ||
|
f8e49ea3f4 | ||
|
f6a4a2127b | ||
|
446fc3f1f8 | ||
|
146525db91 | ||
|
1698b43f9b | ||
|
078b21db85 | ||
|
43adcde094 | ||
|
7a0bb18dcf | ||
|
47a5a4e1fc | ||
|
0f0e5876ae | ||
|
43aa75dc89 | ||
|
95dd1cd7ad | ||
|
36ae946655 | ||
|
24edc94f9d | ||
|
4deae76347 | ||
|
8280d200ea | ||
|
8ee0c57224 | ||
|
cb6f392774 | ||
|
f250c54813 | ||
|
5c6081c4e2 | ||
|
88c56de97b | ||
|
e274af6e3d | ||
|
a0ece3754b | ||
|
0bcc2ae7ab | ||
|
bdb90460c4 | ||
|
824137a02c | ||
|
2edc699eac | ||
|
8e79366076 | ||
|
c1e39b182f | ||
|
13eb276085 | ||
|
4cec502f7b | ||
|
2545469713 | ||
|
f09996a21d | ||
|
5cabf4d040 | ||
|
a03db6d224 | ||
|
8d1b72b951 | ||
|
912e1f93b7 | ||
|
a5aa4d9b54 | ||
|
e777be3dde | ||
|
b5441f6b77 | ||
|
dbbd63e519 | ||
|
adc443ea80 | ||
|
0d32179d07 | ||
|
b45b02b37e | ||
|
12928b832c |
@@ -3,6 +3,7 @@ target
|
|||||||
|
|
||||||
# Data folder
|
# Data folder
|
||||||
data
|
data
|
||||||
|
.env
|
||||||
|
|
||||||
# IDE files
|
# IDE files
|
||||||
.vscode
|
.vscode
|
||||||
@@ -10,5 +11,15 @@ data
|
|||||||
*.iml
|
*.iml
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
|
.github
|
||||||
*.md
|
*.md
|
||||||
|
*.txt
|
||||||
|
*.yml
|
||||||
|
*.yaml
|
||||||
|
|
||||||
|
# Docker folders
|
||||||
|
hooks
|
||||||
|
tools
|
||||||
|
|
||||||
|
# Web vault
|
||||||
|
web-vault
|
122
.env.template
122
.env.template
@@ -1,19 +1,34 @@
|
|||||||
## Bitwarden_RS Configuration File
|
## Bitwarden_RS Configuration File
|
||||||
## Uncomment any of the following lines to change the defaults
|
## Uncomment any of the following lines to change the defaults
|
||||||
|
##
|
||||||
|
## Be aware that most of these settings will be overridden if they were changed
|
||||||
|
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
|
||||||
|
|
||||||
## Main data folder
|
## Main data folder
|
||||||
# DATA_FOLDER=data
|
# DATA_FOLDER=data
|
||||||
|
|
||||||
## Database URL
|
## Database URL
|
||||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||||
## When using MySQL, this it is the URL to the DB, including username and password:
|
|
||||||
## Format: mysql://[user[:password]@]host/database_name
|
|
||||||
# DATABASE_URL=data/db.sqlite3
|
# DATABASE_URL=data/db.sqlite3
|
||||||
|
## When using MySQL, specify an appropriate connection URI.
|
||||||
|
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
|
||||||
|
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
||||||
|
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
||||||
|
## or keyword/value connection string.
|
||||||
|
## Details:
|
||||||
|
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
|
||||||
|
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||||
|
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
||||||
|
|
||||||
|
## Database max connections
|
||||||
|
## Define the size of the connection pool used for connecting to the database.
|
||||||
|
# DATABASE_MAX_CONNS=10
|
||||||
|
|
||||||
## Individual folders, these override %DATA_FOLDER%
|
## Individual folders, these override %DATA_FOLDER%
|
||||||
# RSA_KEY_FILENAME=data/rsa_key
|
# RSA_KEY_FILENAME=data/rsa_key
|
||||||
# ICON_CACHE_FOLDER=data/icon_cache
|
# ICON_CACHE_FOLDER=data/icon_cache
|
||||||
# ATTACHMENTS_FOLDER=data/attachments
|
# ATTACHMENTS_FOLDER=data/attachments
|
||||||
|
# SENDS_FOLDER=data/sends
|
||||||
|
|
||||||
## Templates data folder, by default uses embedded templates
|
## Templates data folder, by default uses embedded templates
|
||||||
## Check source code to see the format
|
## Check source code to see the format
|
||||||
@@ -21,6 +36,10 @@
|
|||||||
## Automatically reload the templates for every request, slow, use only for development
|
## Automatically reload the templates for every request, slow, use only for development
|
||||||
# RELOAD_TEMPLATES=false
|
# RELOAD_TEMPLATES=false
|
||||||
|
|
||||||
|
## Client IP Header, used to identify the IP of the client, defaults to "X-Client-IP"
|
||||||
|
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||||
|
# IP_HEADER=X-Client-IP
|
||||||
|
|
||||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||||
# ICON_CACHE_TTL=2592000
|
# ICON_CACHE_TTL=2592000
|
||||||
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
||||||
@@ -37,14 +56,14 @@
|
|||||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||||
# WEBSOCKET_PORT=3012
|
# WEBSOCKET_PORT=3012
|
||||||
|
|
||||||
## Enable extended logging
|
## Enable extended logging, which shows timestamps and targets in the logs
|
||||||
## This shows timestamps and allows logging to file and to syslog
|
|
||||||
### To enable logging to file, use the LOG_FILE env variable
|
|
||||||
### To enable syslog, use the USE_SYSLOG env variable
|
|
||||||
# EXTENDED_LOGGING=true
|
# EXTENDED_LOGGING=true
|
||||||
|
|
||||||
|
## Timestamp format used in extended logging.
|
||||||
|
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
||||||
|
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||||
|
|
||||||
## Logging to file
|
## Logging to file
|
||||||
## This requires extended logging
|
|
||||||
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||||
# LOG_FILE=/path/to/log
|
# LOG_FILE=/path/to/log
|
||||||
|
|
||||||
@@ -56,7 +75,8 @@
|
|||||||
## Log level
|
## Log level
|
||||||
## Change the verbosity of the log output
|
## Change the verbosity of the log output
|
||||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
## This requires extended logging
|
## Setting it to "trace" or "debug" would also show logs for mounted
|
||||||
|
## routes and static file, websocket and alive requests
|
||||||
# LOG_LEVEL=Info
|
# LOG_LEVEL=Info
|
||||||
|
|
||||||
## Enable WAL for the DB
|
## Enable WAL for the DB
|
||||||
@@ -67,6 +87,10 @@
|
|||||||
## cause performance degradation or might render the service unable to start.
|
## cause performance degradation or might render the service unable to start.
|
||||||
# ENABLE_DB_WAL=true
|
# ENABLE_DB_WAL=true
|
||||||
|
|
||||||
|
## Database connection retries
|
||||||
|
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||||
|
# DB_CONNECTION_RETRIES=15
|
||||||
|
|
||||||
## Disable icon downloading
|
## Disable icon downloading
|
||||||
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
||||||
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||||
@@ -81,10 +105,11 @@
|
|||||||
## Icon blacklist Regex
|
## Icon blacklist Regex
|
||||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
# ICON_BLACKLIST_REGEX=192\.168\.1\.[0-9].*^
|
## NOTE: Always enclose this regex withing single quotes!
|
||||||
|
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||||
|
|
||||||
## Any IP which is not defined as a global IP will be blacklisted.
|
## Any IP which is not defined as a global IP will be blacklisted.
|
||||||
## Usefull to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
||||||
|
|
||||||
## Disable 2FA remember
|
## Disable 2FA remember
|
||||||
@@ -92,6 +117,18 @@
|
|||||||
## Note that the checkbox would still be present, but ignored.
|
## Note that the checkbox would still be present, but ignored.
|
||||||
# DISABLE_2FA_REMEMBER=false
|
# DISABLE_2FA_REMEMBER=false
|
||||||
|
|
||||||
|
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||||
|
# EMAIL_ATTEMPTS_LIMIT=3
|
||||||
|
|
||||||
|
## Token expiration time
|
||||||
|
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||||
|
# EMAIL_EXPIRATION_TIME=600
|
||||||
|
|
||||||
|
## Email token size
|
||||||
|
## Number of digits in an email token (min: 6, max: 19).
|
||||||
|
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||||
|
# EMAIL_TOKEN_SIZE=6
|
||||||
|
|
||||||
## Controls if new users can register
|
## Controls if new users can register
|
||||||
# SIGNUPS_ALLOWED=true
|
# SIGNUPS_ALLOWED=true
|
||||||
|
|
||||||
@@ -113,6 +150,14 @@
|
|||||||
## even if SIGNUPS_ALLOWED is set to false
|
## even if SIGNUPS_ALLOWED is set to false
|
||||||
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
||||||
|
|
||||||
|
## Controls which users can create new orgs.
|
||||||
|
## Blank or 'all' means all users can create orgs (this is the default):
|
||||||
|
# ORG_CREATION_USERS=
|
||||||
|
## 'none' means no users can create orgs:
|
||||||
|
# ORG_CREATION_USERS=none
|
||||||
|
## A comma-separated list means only those users can create orgs:
|
||||||
|
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||||
|
|
||||||
## Token for the admin interface, preferably use a long random string
|
## Token for the admin interface, preferably use a long random string
|
||||||
## One option is to use 'openssl rand -base64 48'
|
## One option is to use 'openssl rand -base64 48'
|
||||||
## If not set, the admin panel is disabled
|
## If not set, the admin panel is disabled
|
||||||
@@ -124,6 +169,16 @@
|
|||||||
|
|
||||||
## Invitations org admins to invite users, even when signups are disabled
|
## Invitations org admins to invite users, even when signups are disabled
|
||||||
# INVITATIONS_ALLOWED=true
|
# INVITATIONS_ALLOWED=true
|
||||||
|
## Name shown in the invitation emails that don't come from a specific organization
|
||||||
|
# INVITATION_ORG_NAME=Bitwarden_RS
|
||||||
|
|
||||||
|
## Per-organization attachment limit (KB)
|
||||||
|
## Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
||||||
|
# ORG_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user attachment limit (KB).
|
||||||
|
## Limit in kilobytes for a users attachments, once the limit is exceeded it won't be possible to upload more
|
||||||
|
# USER_ATTACHMENT_LIMIT=
|
||||||
|
|
||||||
|
|
||||||
## Controls the PBBKDF password iterations to apply on the server
|
## Controls the PBBKDF password iterations to apply on the server
|
||||||
## The change only applies when the password is changed
|
## The change only applies when the password is changed
|
||||||
@@ -139,6 +194,13 @@
|
|||||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||||
# DOMAIN=https://bw.domain.tld:8443
|
# DOMAIN=https://bw.domain.tld:8443
|
||||||
|
|
||||||
|
## Allowed iframe ancestors (Know the risks!)
|
||||||
|
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||||
|
## Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
|
||||||
|
## This adds the configured value to the 'Content-Security-Policy' headers 'frame-ancestors' value.
|
||||||
|
## Multiple values must be separated with a whitespace.
|
||||||
|
# ALLOWED_IFRAME_ANCESTORS=
|
||||||
|
|
||||||
## Yubico (Yubikey) Settings
|
## Yubico (Yubikey) Settings
|
||||||
## Set your Client ID and Secret Key for Yubikey OTP
|
## Set your Client ID and Secret Key for Yubikey OTP
|
||||||
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
||||||
@@ -182,11 +244,45 @@
|
|||||||
# SMTP_HOST=smtp.domain.tld
|
# SMTP_HOST=smtp.domain.tld
|
||||||
# SMTP_FROM=bitwarden-rs@domain.tld
|
# SMTP_FROM=bitwarden-rs@domain.tld
|
||||||
# SMTP_FROM_NAME=Bitwarden_RS
|
# SMTP_FROM_NAME=Bitwarden_RS
|
||||||
# SMTP_PORT=587
|
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||||
# SMTP_SSL=true
|
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
|
||||||
|
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
|
||||||
# SMTP_USERNAME=username
|
# SMTP_USERNAME=username
|
||||||
# SMTP_PASSWORD=password
|
# SMTP_PASSWORD=password
|
||||||
# SMTP_AUTH_MECHANISM="Plain"
|
|
||||||
# SMTP_TIMEOUT=15
|
# SMTP_TIMEOUT=15
|
||||||
|
|
||||||
|
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||||
|
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||||
|
## Multiple options need to be separated by a comma ','.
|
||||||
|
# SMTP_AUTH_MECHANISM="Plain"
|
||||||
|
|
||||||
|
## Server name sent during the SMTP HELO
|
||||||
|
## By default this value should be is on the machine's hostname,
|
||||||
|
## but might need to be changed in case it trips some anti-spam filters
|
||||||
|
# HELO_NAME=
|
||||||
|
|
||||||
|
## SMTP debugging
|
||||||
|
## When set to true this will output very detailed SMTP messages.
|
||||||
|
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||||
|
# SMTP_DEBUG=false
|
||||||
|
|
||||||
|
## Accept Invalid Hostnames
|
||||||
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||||
|
|
||||||
|
## Accept Invalid Certificates
|
||||||
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
||||||
|
# SMTP_ACCEPT_INVALID_CERTS=false
|
||||||
|
|
||||||
|
## Require new device emails. When a user logs in an email is required to be sent.
|
||||||
|
## If sending the email fails the login attempt will fail!!
|
||||||
|
# REQUIRE_DEVICE_EMAIL=false
|
||||||
|
|
||||||
|
## HIBP Api Key
|
||||||
|
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||||
|
# HIBP_API_KEY=
|
||||||
|
|
||||||
# vim: syntax=ini
|
# vim: syntax=ini
|
||||||
|
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Ignore vendored scripts in GitHub stats
|
||||||
|
src/static/* linguist-vendored
|
||||||
|
|
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
@@ -1 +1,2 @@
|
|||||||
github: dani-garcia
|
github: dani-garcia
|
||||||
|
custom: ["https://paypal.me/DaniGG"]
|
||||||
|
66
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
66
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Use this ONLY for bugs in bitwarden_rs itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
<!--
|
||||||
|
# ###
|
||||||
|
NOTE: Please update to the latest version of bitwarden_rs before reporting an issue!
|
||||||
|
This saves you and us a lot of time and troubleshooting.
|
||||||
|
See:
|
||||||
|
* https://github.com/dani-garcia/bitwarden_rs/issues/1180
|
||||||
|
* https://github.com/dani-garcia/bitwarden_rs/wiki/Updating-the-bitwarden-image
|
||||||
|
# ###
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Please fill out the following template to make solving your problem easier and faster for us.
|
||||||
|
This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them.
|
||||||
|
|
||||||
|
Remember to hide/redact personal or confidential information,
|
||||||
|
such as passwords, IP addresses, and DNS names as appropriate.
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Subject of the issue
|
||||||
|
<!-- Describe your issue here. -->
|
||||||
|
|
||||||
|
### Deployment environment
|
||||||
|
|
||||||
|
<!--
|
||||||
|
=========================================================================================
|
||||||
|
Preferably, use the `Generate Support String` button on the admin page's Diagnostics tab.
|
||||||
|
That will auto-generate most of the info requested in this section.
|
||||||
|
=========================================================================================
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
|
||||||
|
<!-- This is NOT the version number shown on the web vault, which is versioned separately from bitwarden_rs -->
|
||||||
|
<!-- Remember to check if your issue exists on the latest version first! -->
|
||||||
|
* bitwarden_rs version:
|
||||||
|
|
||||||
|
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
|
||||||
|
* Install method:
|
||||||
|
|
||||||
|
* Clients used: <!-- web vault, desktop, Android, iOS, etc. (if applicable) -->
|
||||||
|
|
||||||
|
* Reverse proxy and version: <!-- if applicable -->
|
||||||
|
|
||||||
|
* MySQL/MariaDB or PostgreSQL version: <!-- if applicable -->
|
||||||
|
|
||||||
|
* Other relevant details:
|
||||||
|
|
||||||
|
### Steps to reproduce
|
||||||
|
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
||||||
|
and how did you start bitwarden_rs? -->
|
||||||
|
|
||||||
|
### Expected behaviour
|
||||||
|
<!-- Tell us what you expected to happen -->
|
||||||
|
|
||||||
|
### Actual behaviour
|
||||||
|
<!-- Tell us what actually happened -->
|
||||||
|
|
||||||
|
### Troubleshooting data
|
||||||
|
<!-- Share any log files, screenshots, or other relevant troubleshooting data -->
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
blank_issues_enabled: false
|
||||||
|
contact_links:
|
||||||
|
- name: Discourse forum for bitwarden_rs
|
||||||
|
url: https://bitwardenrs.discourse.group/
|
||||||
|
about: Use this forum to request features or get help with usage/configuration.
|
||||||
|
- name: GitHub Discussions for bitwarden_rs
|
||||||
|
url: https://github.com/dani-garcia/bitwarden_rs/discussions
|
||||||
|
about: An alternative to the Discourse forum, if this is easier for you.
|
144
.github/workflows/build.yml
vendored
Normal file
144
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
name: Build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
# Ignore when there are only changes done too one of these paths
|
||||||
|
paths-ignore:
|
||||||
|
- "**.md"
|
||||||
|
- "**.txt"
|
||||||
|
- "azure-pipelines.yml"
|
||||||
|
- "docker/**"
|
||||||
|
- "hooks/**"
|
||||||
|
- "tools/**"
|
||||||
|
pull_request:
|
||||||
|
# Ignore when there are only changes done too one of these paths
|
||||||
|
paths-ignore:
|
||||||
|
- "**.md"
|
||||||
|
- "**.txt"
|
||||||
|
- "azure-pipelines.yml"
|
||||||
|
- "docker/**"
|
||||||
|
- "hooks/**"
|
||||||
|
- "tools/**"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
channel:
|
||||||
|
- nightly
|
||||||
|
# - stable
|
||||||
|
target-triple:
|
||||||
|
- x86_64-unknown-linux-gnu
|
||||||
|
# - x86_64-unknown-linux-musl
|
||||||
|
include:
|
||||||
|
- target-triple: x86_64-unknown-linux-gnu
|
||||||
|
host-triple: x86_64-unknown-linux-gnu
|
||||||
|
features: "sqlite,mysql,postgresql"
|
||||||
|
channel: nightly
|
||||||
|
os: ubuntu-18.04
|
||||||
|
ext:
|
||||||
|
# - target-triple: x86_64-unknown-linux-gnu
|
||||||
|
# host-triple: x86_64-unknown-linux-gnu
|
||||||
|
# features: "sqlite,mysql,postgresql"
|
||||||
|
# channel: stable
|
||||||
|
# os: ubuntu-18.04
|
||||||
|
# ext:
|
||||||
|
# - target-triple: x86_64-unknown-linux-musl
|
||||||
|
# host-triple: x86_64-unknown-linux-gnu
|
||||||
|
# features: "sqlite,postgresql"
|
||||||
|
# channel: nightly
|
||||||
|
# os: ubuntu-18.04
|
||||||
|
# ext:
|
||||||
|
# - target-triple: x86_64-unknown-linux-musl
|
||||||
|
# host-triple: x86_64-unknown-linux-gnu
|
||||||
|
# features: "sqlite,postgresql"
|
||||||
|
# channel: stable
|
||||||
|
# os: ubuntu-18.04
|
||||||
|
# ext:
|
||||||
|
|
||||||
|
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
# Checkout the repo
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
|
# Install musl-tools when needed
|
||||||
|
- name: Install musl tools
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends musl-dev musl-tools cmake
|
||||||
|
if: matrix.target-triple == 'x86_64-unknown-linux-musl'
|
||||||
|
# End Install musl-tools when needed
|
||||||
|
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
- name: Install dependencies Ubuntu
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||||
|
if: startsWith( matrix.os, 'ubuntu' )
|
||||||
|
# End Install dependencies
|
||||||
|
|
||||||
|
|
||||||
|
# Enable Rust Caching
|
||||||
|
- uses: Swatinem/rust-cache@v1
|
||||||
|
# End Enable Rust Caching
|
||||||
|
|
||||||
|
|
||||||
|
# Uses the rust-toolchain file to determine version
|
||||||
|
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
|
||||||
|
uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
target: ${{ matrix.target-triple }}
|
||||||
|
components: clippy
|
||||||
|
# End Uses the rust-toolchain file to determine version
|
||||||
|
|
||||||
|
|
||||||
|
# Run cargo tests (In release mode to speed up future builds)
|
||||||
|
- name: '`cargo test --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||||
|
# End Run cargo tests
|
||||||
|
|
||||||
|
|
||||||
|
# Run cargo clippy (In release mode to speed up future builds)
|
||||||
|
- name: '`cargo clippy --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: clippy
|
||||||
|
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||||
|
# End Run cargo clippy
|
||||||
|
|
||||||
|
|
||||||
|
# Build the binary
|
||||||
|
- name: '`cargo build --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: build
|
||||||
|
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||||
|
# End Build the binary
|
||||||
|
|
||||||
|
|
||||||
|
# Upload artifact to Github Actions
|
||||||
|
- name: Upload artifact
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||||
|
path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
||||||
|
# End Upload artifact to Github Actions
|
||||||
|
|
||||||
|
|
||||||
|
## This is not used at the moment
|
||||||
|
## We could start using this when we can build static binaries
|
||||||
|
# Upload to github actions release
|
||||||
|
# - name: Release
|
||||||
|
# uses: Shopify/upload-to-release@1
|
||||||
|
# if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
# with:
|
||||||
|
# name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||||
|
# path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
||||||
|
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
# End Upload to github actions release
|
34
.github/workflows/hadolint.yml
vendored
Normal file
34
.github/workflows/hadolint.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
name: Hadolint
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
# Ignore when there are only changes done too one of these paths
|
||||||
|
paths:
|
||||||
|
- "docker/**"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
hadolint:
|
||||||
|
name: Validate Dockerfile syntax
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
# Checkout the repo
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
|
# Download hadolint
|
||||||
|
- name: Download hadolint
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||||
|
sudo chmod +x /usr/local/bin/hadolint
|
||||||
|
env:
|
||||||
|
HADOLINT_VERSION: 1.19.0
|
||||||
|
# End Download hadolint
|
||||||
|
|
||||||
|
# Test Dockerfiles
|
||||||
|
- name: Run hadolint
|
||||||
|
shell: bash
|
||||||
|
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored | xargs hadolint
|
||||||
|
# End Test Dockerfiles
|
21
.travis.yml
21
.travis.yml
@@ -1,21 +0,0 @@
|
|||||||
dist: xenial
|
|
||||||
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
- HADOLINT_VERSION=1.17.1
|
|
||||||
|
|
||||||
language: rust
|
|
||||||
rust: nightly
|
|
||||||
cache: cargo
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint
|
|
||||||
- sudo chmod +rx /usr/local/bin/hadolint
|
|
||||||
- rustup set profile minimal
|
|
||||||
|
|
||||||
# Nothing to install
|
|
||||||
install: true
|
|
||||||
script:
|
|
||||||
- git ls-files --exclude='Dockerfile*' --ignored | xargs --max-lines=1 hadolint
|
|
||||||
- cargo build --features "sqlite"
|
|
||||||
- cargo build --features "mysql"
|
|
3439
Cargo.lock
generated
3439
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
102
Cargo.toml
102
Cargo.toml
@@ -14,8 +14,14 @@ build = "build.rs"
|
|||||||
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
||||||
enable_syslog = []
|
enable_syslog = []
|
||||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||||
postgresql = ["diesel/postgres", "diesel_migrations/postgres", "openssl"]
|
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
||||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||||
|
# Enable to use a vendored and statically linked openssl
|
||||||
|
vendored_openssl = ["openssl/vendored"]
|
||||||
|
|
||||||
|
# Enable unstable features, requires nightly
|
||||||
|
# Currently only used to enable rusts official ip support
|
||||||
|
unstable = []
|
||||||
|
|
||||||
[target."cfg(not(windows))".dependencies]
|
[target."cfg(not(windows))".dependencies]
|
||||||
syslog = "4.0.1"
|
syslog = "4.0.1"
|
||||||
@@ -26,104 +32,106 @@ rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
|
|||||||
rocket_contrib = "0.5.0-dev"
|
rocket_contrib = "0.5.0-dev"
|
||||||
|
|
||||||
# HTTP client
|
# HTTP client
|
||||||
reqwest = "0.9.22"
|
reqwest = { version = "0.11.2", features = ["blocking", "json"] }
|
||||||
|
|
||||||
# multipart/form-data support
|
# multipart/form-data support
|
||||||
multipart = { version = "0.16.1", features = ["server"], default-features = false }
|
multipart = { version = "0.17.1", features = ["server"], default-features = false }
|
||||||
|
|
||||||
# WebSockets library
|
# WebSockets library
|
||||||
ws = "0.9.1"
|
ws = { version = "0.10.0", package = "parity-ws" }
|
||||||
|
|
||||||
# MessagePack library
|
# MessagePack library
|
||||||
rmpv = "0.4.2"
|
rmpv = "0.4.7"
|
||||||
|
|
||||||
# Concurrent hashmap implementation
|
# Concurrent hashmap implementation
|
||||||
chashmap = "2.2.2"
|
chashmap = "2.2.2"
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = "1.0.103"
|
serde = { version = "1.0.125", features = ["derive"] }
|
||||||
serde_derive = "1.0.103"
|
serde_json = "1.0.64"
|
||||||
serde_json = "1.0.42"
|
|
||||||
|
|
||||||
# Logging
|
# Logging
|
||||||
log = "0.4.8"
|
log = "0.4.14"
|
||||||
fern = { version = "0.5.9", features = ["syslog-4"] }
|
fern = { version = "0.6.0", features = ["syslog-4"] }
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "1.4.3", features = [ "chrono", "r2d2"] }
|
diesel = { version = "1.4.6", features = [ "chrono", "r2d2"] }
|
||||||
diesel_migrations = "1.4.0"
|
diesel_migrations = "1.4.0"
|
||||||
|
|
||||||
# Bundled SQLite
|
# Bundled SQLite
|
||||||
libsqlite3-sys = { version = "0.16.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.20.1", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto library
|
# Crypto-related libraries
|
||||||
ring = "0.14.6"
|
rand = "0.8.3"
|
||||||
|
ring = "0.16.20"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "0.8.1", features = ["v4"] }
|
uuid = { version = "0.8.2", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time library for Rust
|
# Date and time libraries
|
||||||
chrono = "0.4.10"
|
chrono = { version = "0.4.19", features = ["serde"] }
|
||||||
|
chrono-tz = "0.5.3"
|
||||||
|
time = "0.2.26"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
oath = "0.10.2"
|
oath = "0.10.2"
|
||||||
|
|
||||||
# Data encoding library
|
# Data encoding library
|
||||||
data-encoding = "2.1.2"
|
data-encoding = "2.3.2"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "6.0.1"
|
jsonwebtoken = "7.2.0"
|
||||||
|
|
||||||
# U2F library
|
# U2F library
|
||||||
u2f = "0.1.6"
|
u2f = "0.2.0"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.7.1", features = ["online-tokio"], default-features = false }
|
yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false }
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# A `dotenv` implementation for Rust
|
||||||
dotenv = { version = "0.15.0", default-features = false }
|
dotenv = { version = "0.15.0", default-features = false }
|
||||||
|
|
||||||
# Lazy static macro
|
# Lazy initialization
|
||||||
lazy_static = "1.4.0"
|
once_cell = "1.7.2"
|
||||||
|
|
||||||
# More derives
|
|
||||||
derive_more = "0.99.2"
|
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.10"
|
num-traits = "0.2.14"
|
||||||
num-derive = "0.3.0"
|
num-derive = "0.3.3"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = "0.9.2"
|
lettre = { version = "0.10.0-beta.3", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||||
lettre_email = "0.9.2"
|
newline-converter = "0.2.0"
|
||||||
native-tls = "0.2.3"
|
|
||||||
quoted_printable = "0.4.1"
|
|
||||||
|
|
||||||
# Template library
|
# Template library
|
||||||
handlebars = "2.0.2"
|
handlebars = { version = "3.5.3", features = ["dir_source"] }
|
||||||
|
|
||||||
# For favicon extraction from main website
|
# For favicon extraction from main website
|
||||||
soup = "0.4.1"
|
html5ever = "0.25.1"
|
||||||
regex = "1.3.1"
|
markup5ever_rcdom = "0.1.0"
|
||||||
|
regex = { version = "1.4.5", features = ["std", "perf"], default-features = false }
|
||||||
data-url = "0.1.0"
|
data-url = "0.1.0"
|
||||||
|
|
||||||
# Required for SSL support for PostgreSQL
|
# Used by U2F, JWT and Postgres
|
||||||
openssl = { version = "0.10.26", optional = true }
|
openssl = "0.10.33"
|
||||||
|
|
||||||
# URL encoding library
|
# URL encoding library
|
||||||
percent-encoding = "2.1.0"
|
percent-encoding = "2.1.0"
|
||||||
|
# Punycode conversion
|
||||||
|
idna = "0.2.2"
|
||||||
|
|
||||||
|
# CLI argument parsing
|
||||||
|
pico-args = "0.4.0"
|
||||||
|
|
||||||
|
# Logging panics to logfile instead stderr only
|
||||||
|
backtrace = "0.3.56"
|
||||||
|
|
||||||
|
# Macro ident concatenation
|
||||||
|
paste = "1.0.5"
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
# Add support for Timestamp type
|
|
||||||
rmp = { git = 'https://github.com/3Hren/msgpack-rust', rev = 'd6c6c672e470341207ed9feb69b56322b5597a11' }
|
|
||||||
|
|
||||||
# Use newest ring
|
# Use newest ring
|
||||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'b95b6765e1cc8be7c1e7eaef8a9d9ad940b0ac13' }
|
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'b95b6765e1cc8be7c1e7eaef8a9d9ad940b0ac13' }
|
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||||
|
|
||||||
# Use git version for timeout fix #706
|
|
||||||
lettre = { git = 'https://github.com/lettre/lettre', rev = '24d694db3be017d82b1cdc8bf9da601420b31bb0' }
|
|
||||||
lettre_email = { git = 'https://github.com/lettre/lettre', rev = '24d694db3be017d82b1cdc8bf9da601420b31bb0' }
|
|
||||||
|
|
||||||
# For favicon extraction from main website
|
# For favicon extraction from main website
|
||||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '7f1bd6ce1c2fde599a757302a843a60e714c5f72' }
|
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '540ede02d0771824c0c80ff9f57fe8eff38b1291' }
|
||||||
|
@@ -1 +1 @@
|
|||||||
docker/amd64/sqlite/Dockerfile
|
docker/amd64/Dockerfile
|
11
README.md
11
README.md
@@ -13,7 +13,7 @@ Image is based on [Rust implementation of Bitwarden API](https://github.com/dani
|
|||||||
|
|
||||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||||
|
|
||||||
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any Bitwarden related bug-reports or suggestions [here](https://github.com/dani-garcia/bitwarden_rs/issues/new), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any bugs or suggestions to us directly (look at the bottom of this page for ways to get in touch), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -21,14 +21,13 @@ Image is based on [Rust implementation of Bitwarden API](https://github.com/dani
|
|||||||
|
|
||||||
Basically full implementation of Bitwarden API is provided including:
|
Basically full implementation of Bitwarden API is provided including:
|
||||||
|
|
||||||
* Basic single user functionality
|
|
||||||
* Organizations support
|
* Organizations support
|
||||||
* Attachments
|
* Attachments
|
||||||
* Vault API support
|
* Vault API support
|
||||||
* Serving the static files for Vault interface
|
* Serving the static files for Vault interface
|
||||||
* Website icons API
|
* Website icons API
|
||||||
* Authenticator and U2F support
|
* Authenticator and U2F support
|
||||||
* YubiKey OTP
|
* YubiKey and Duo support
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
Pull the docker image and mount a volume from the host for persistent storage:
|
Pull the docker image and mount a volume from the host for persistent storage:
|
||||||
@@ -49,12 +48,14 @@ If you have an available domain name, you can get HTTPS certificates with [Let's
|
|||||||
See the [bitwarden_rs wiki](https://github.com/dani-garcia/bitwarden_rs/wiki) for more information on how to configure and run the bitwarden_rs server.
|
See the [bitwarden_rs wiki](https://github.com/dani-garcia/bitwarden_rs/wiki) for more information on how to configure and run the bitwarden_rs server.
|
||||||
|
|
||||||
## Get in touch
|
## Get in touch
|
||||||
|
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://bitwardenrs.discourse.group/).
|
||||||
|
|
||||||
To ask a question, [raising an issue](https://github.com/dani-garcia/bitwarden_rs/issues/new) is fine. Please also report any bugs spotted here.
|
If you spot any bugs or crashes with bitwarden_rs itself, please [create an issue](https://github.com/dani-garcia/bitwarden_rs/issues/). Make sure there aren't any similar issues open, though!
|
||||||
|
|
||||||
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/#bitwarden_rs:matrix.org) room on Matrix. Feel free to join us!
|
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/#bitwarden_rs:matrix.org) room on Matrix. Feel free to join us!
|
||||||
|
|
||||||
### Sponsors
|
### Sponsors
|
||||||
Thanks for your contribution to the project!
|
Thanks for your contribution to the project!
|
||||||
|
|
||||||
- [@Skaronator](https://github.com/Skaronator)
|
- [@ChonoN](https://github.com/ChonoN)
|
||||||
|
- [@themightychris](https://github.com/themightychris)
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
pool:
|
pool:
|
||||||
vmImage: 'Ubuntu-16.04'
|
vmImage: 'Ubuntu-18.04'
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- script: |
|
- script: |
|
||||||
@@ -10,16 +10,13 @@ steps:
|
|||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y libmysql++-dev
|
sudo apt-get install -y --no-install-recommends build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||||
displayName: Install libmysql
|
displayName: 'Install build libraries.'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
rustc -Vv
|
rustc -Vv
|
||||||
cargo -V
|
cargo -V
|
||||||
displayName: Query rust and cargo versions
|
displayName: Query rust and cargo versions
|
||||||
|
|
||||||
- script : cargo build --features "sqlite"
|
- script : cargo test --features "sqlite,mysql,postgresql"
|
||||||
displayName: 'Build project with sqlite backend'
|
displayName: 'Test project with sqlite, mysql and postgresql backends'
|
||||||
|
|
||||||
- script : cargo build --features "mysql"
|
|
||||||
displayName: 'Build project with mysql backend'
|
|
||||||
|
25
build.rs
25
build.rs
@@ -1,17 +1,24 @@
|
|||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
|
use std::env;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
#[cfg(all(feature = "sqlite", feature = "mysql"))]
|
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
|
||||||
compile_error!("Can't enable both sqlite and mysql at the same time");
|
#[cfg(feature = "sqlite")]
|
||||||
#[cfg(all(feature = "sqlite", feature = "postgresql"))]
|
println!("cargo:rustc-cfg=sqlite");
|
||||||
compile_error!("Can't enable both sqlite and postgresql at the same time");
|
#[cfg(feature = "mysql")]
|
||||||
#[cfg(all(feature = "mysql", feature = "postgresql"))]
|
println!("cargo:rustc-cfg=mysql");
|
||||||
compile_error!("Can't enable both mysql and postgresql at the same time");
|
#[cfg(feature = "postgresql")]
|
||||||
|
println!("cargo:rustc-cfg=postgresql");
|
||||||
|
|
||||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||||
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
||||||
|
|
||||||
|
if let Ok(version) = env::var("BWRS_VERSION") {
|
||||||
|
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||||
|
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||||
|
} else {
|
||||||
read_git_info().ok();
|
read_git_info().ok();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
||||||
@@ -54,14 +61,16 @@ fn read_git_info() -> Result<(), std::io::Error> {
|
|||||||
} else {
|
} else {
|
||||||
format!("{}-{}", last_tag, rev_short)
|
format!("{}-{}", last_tag, rev_short)
|
||||||
};
|
};
|
||||||
println!("cargo:rustc-env=GIT_VERSION={}", version);
|
|
||||||
|
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||||
|
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||||
|
|
||||||
// To access these values, use:
|
// To access these values, use:
|
||||||
// env!("GIT_EXACT_TAG")
|
// env!("GIT_EXACT_TAG")
|
||||||
// env!("GIT_LAST_TAG")
|
// env!("GIT_LAST_TAG")
|
||||||
// env!("GIT_BRANCH")
|
// env!("GIT_BRANCH")
|
||||||
// env!("GIT_REV")
|
// env!("GIT_REV")
|
||||||
// env!("GIT_VERSION")
|
// env!("BWRS_VERSION")
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
33
docker/Dockerfile.buildx
Normal file
33
docker/Dockerfile.buildx
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# The cross-built images have the build arch (`amd64`) embedded in the image
|
||||||
|
# manifest, rather than the target arch. For example:
|
||||||
|
#
|
||||||
|
# $ docker inspect bitwardenrs/server:latest-armv7 | jq -r '.[]|.Architecture'
|
||||||
|
# amd64
|
||||||
|
#
|
||||||
|
# Recent versions of Docker have started printing a warning when the image's
|
||||||
|
# claimed arch doesn't match the host arch. For example:
|
||||||
|
#
|
||||||
|
# WARNING: The requested image's platform (linux/amd64) does not match the
|
||||||
|
# detected host platform (linux/arm/v7) and no specific platform was requested
|
||||||
|
#
|
||||||
|
# The image still works fine, but the spurious warning creates confusion.
|
||||||
|
#
|
||||||
|
# Docker doesn't seem to provide a way to directly set the arch of an image
|
||||||
|
# at build time. To resolve the build vs. target arch discrepancy, we use
|
||||||
|
# Docker Buildx to build a new set of images with the correct target arch.
|
||||||
|
#
|
||||||
|
# Docker Buildx uses this Dockerfile to build an image for each requested
|
||||||
|
# platform. Since the Dockerfile basically consists of a single `FROM`
|
||||||
|
# instruction, we're effectively telling Buildx to build a platform-specific
|
||||||
|
# image by simply copying the existing cross-built image and setting the
|
||||||
|
# correct target arch as a side effect.
|
||||||
|
#
|
||||||
|
# References:
|
||||||
|
#
|
||||||
|
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
||||||
|
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
|
||||||
|
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
|
||||||
|
#
|
||||||
|
ARG LOCAL_REPO
|
||||||
|
ARG DOCKER_TAG
|
||||||
|
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}
|
269
docker/Dockerfile.j2
Normal file
269
docker/Dockerfile.j2
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
{% set build_stage_base_image = "rust:1.50" %}
|
||||||
|
{% if "alpine" in target_file %}
|
||||||
|
{% if "amd64" in target_file %}
|
||||||
|
{% set build_stage_base_image = "clux/muslrust:nightly-2021-02-22" %}
|
||||||
|
{% set runtime_stage_base_image = "alpine:3.13" %}
|
||||||
|
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||||
|
{% elif "armv7" in target_file %}
|
||||||
|
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
|
||||||
|
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.13" %}
|
||||||
|
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||||
|
{% endif %}
|
||||||
|
{% elif "amd64" in target_file %}
|
||||||
|
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
||||||
|
{% elif "arm64" in target_file %}
|
||||||
|
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
||||||
|
{% set package_arch_name = "arm64" %}
|
||||||
|
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||||
|
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||||
|
{% elif "armv6" in target_file %}
|
||||||
|
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
||||||
|
{% set package_arch_name = "armel" %}
|
||||||
|
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||||
|
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||||
|
{% elif "armv7" in target_file %}
|
||||||
|
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
||||||
|
{% set package_arch_name = "armhf" %}
|
||||||
|
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||||
|
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||||
|
{% endif %}
|
||||||
|
{% if package_arch_name is defined %}
|
||||||
|
{% set package_arch_prefix = ":" + package_arch_name %}
|
||||||
|
{% else %}
|
||||||
|
{% set package_arch_prefix = "" %}
|
||||||
|
{% endif %}
|
||||||
|
{% if package_arch_target is defined %}
|
||||||
|
{% set package_arch_target_param = " --target=" + package_arch_target %}
|
||||||
|
{% else %}
|
||||||
|
{% set package_arch_target_param = "" %}
|
||||||
|
{% endif %}
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
{% set vault_version = "2.19.0" %}
|
||||||
|
{% set vault_image_digest = "sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4" %}
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v{{ vault_version }}
|
||||||
|
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" bitwardenrs/web-vault:v{{ vault_version }}
|
||||||
|
# [bitwardenrs/web-vault@{{ vault_image_digest }}]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" bitwardenrs/web-vault@{{ vault_image_digest }}
|
||||||
|
# [bitwardenrs/web-vault:v{{ vault_version }}]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@{{ vault_image_digest }} as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM {{ build_stage_base_image }} as build
|
||||||
|
|
||||||
|
{% if "alpine" in target_file %}
|
||||||
|
{% if "amd64" in target_file %}
|
||||||
|
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||||
|
ARG DB=sqlite,postgresql
|
||||||
|
{% set features = "sqlite,postgresql" %}
|
||||||
|
{% else %}
|
||||||
|
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||||
|
ARG DB=sqlite
|
||||||
|
{% set features = "sqlite" %}
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# Debian-based builds support multidb
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
{% set features = "sqlite,mysql,postgresql" %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
|
# Don't download rust docs
|
||||||
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
|
{% if "alpine" in target_file %}
|
||||||
|
ENV USER "root"
|
||||||
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
{% if "armv7" in target_file %}
|
||||||
|
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||||
|
{% endif %}
|
||||||
|
{% elif "arm" in target_file %}
|
||||||
|
# Install required build libs for {{ package_arch_name }} architecture.
|
||||||
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture {{ package_arch_name }} \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev{{ package_arch_prefix }} \
|
||||||
|
libc6-dev{{ package_arch_prefix }} \
|
||||||
|
libpq5{{ package_arch_prefix }} \
|
||||||
|
libpq-dev \
|
||||||
|
libmariadb-dev{{ package_arch_prefix }} \
|
||||||
|
libmariadb-dev-compat{{ package_arch_prefix }}
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-{{ package_cross_compiler }} \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.{{ package_arch_target }}]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
{% if "amd64" in target_file and "alpine" not in target_file %}
|
||||||
|
# Install DB packages
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libmariadb-dev{{ package_arch_prefix }} \
|
||||||
|
libpq-dev{{ package_arch_prefix }} \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
{% if "alpine" not in target_file %}
|
||||||
|
{% if "arm" in target_file %}
|
||||||
|
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||||
|
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||||
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
|
||||||
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||||
|
apt-get download libmariadb-dev-compat:amd64 && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||||
|
rm -rvf ./libmariadb-dev-compat*.deb
|
||||||
|
|
||||||
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
|
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
RUN ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so
|
||||||
|
|
||||||
|
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||||
|
{% endif -%}
|
||||||
|
{% endif %}
|
||||||
|
{% if package_arch_target is defined %}
|
||||||
|
RUN rustup target add {{ package_arch_target }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||||
|
{% if "alpine" in target_file %}
|
||||||
|
{% if "armv7" in target_file %}
|
||||||
|
RUN musl-strip target/{{ package_arch_target }}/release/bitwarden_rs
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM {{ runtime_stage_base_image }}
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
{% if "alpine" in runtime_stage_base_image %}
|
||||||
|
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if "amd64" not in target_file %}
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
# Install needed libraries
|
||||||
|
{% if "alpine" in runtime_stage_base_image %}
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
{% if "sqlite" in features %}
|
||||||
|
sqlite \
|
||||||
|
{% endif %}
|
||||||
|
{% if "mysql" in features %}
|
||||||
|
mariadb-connector-c \
|
||||||
|
{% endif %}
|
||||||
|
{% if "postgresql" in features %}
|
||||||
|
postgresql-libs \
|
||||||
|
{% endif %}
|
||||||
|
ca-certificates
|
||||||
|
{% else %}
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
sqlite3 \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
{% if "amd64" not in target_file %}
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
{% if package_arch_target is defined %}
|
||||||
|
COPY --from=build /app/target/{{ package_arch_target }}/release/bitwarden_rs .
|
||||||
|
{% else %}
|
||||||
|
COPY --from=build /app/target/release/bitwarden_rs .
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
9
docker/Makefile
Normal file
9
docker/Makefile
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
OBJECTS := $(shell find ./ -mindepth 2 -name 'Dockerfile*')
|
||||||
|
|
||||||
|
all: $(OBJECTS)
|
||||||
|
|
||||||
|
%/Dockerfile: Dockerfile.j2 render_template
|
||||||
|
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||||
|
|
||||||
|
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
||||||
|
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
3
docker/README.md
Normal file
3
docker/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
The arch-specific directory names follow the arch identifiers used by the Docker official images:
|
||||||
|
|
||||||
|
https://github.com/docker-library/official-images/blob/master/README.md#architectures-other-than-amd64
|
@@ -1,110 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.10 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.39 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Prepare openssl arm64 libs
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
libc6-dev:arm64 \
|
|
||||||
libmariadb-dev:arm64
|
|
||||||
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/aarch64-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadbclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,109 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.10 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.39 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward comaptibility
|
|
||||||
ARG DB=sqlite
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Prepare openssl arm64 libs
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
libc6-dev:arm64
|
|
||||||
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/aarch64-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
sqlite3 \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,50 +1,50 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
FROM alpine:3.10 as vault
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
RUN apk add --no-cache --upgrade \
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
curl \
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
tar
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||||
RUN mkdir /web-vault
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||||
WORKDIR /web-vault
|
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||||
|
#
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||||
RUN curl -L $URL | tar xz
|
# [bitwardenrs/web-vault:v2.19.0]
|
||||||
RUN ls
|
#
|
||||||
|
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
FROM rust:1.50 as build
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.39 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
# Debian-based builds support multidb
|
||||||
ARG DB=postgresql
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
# Don't download rust docs
|
# Don't download rust docs
|
||||||
RUN rustup set profile minimal
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
# Using bundled SQLite, no need to install it
|
# Install DB packages
|
||||||
# RUN apt-get update && apt-get install -y\
|
|
||||||
# --no-install-recommends \
|
|
||||||
# sqlite3\
|
|
||||||
# && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Install MySQL package
|
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
|
libmariadb-dev \
|
||||||
libpq-dev \
|
libpq-dev \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
# Copies over *only* your manifests and build files
|
||||||
@@ -52,6 +52,7 @@ COPY ./Cargo.* ./
|
|||||||
COPY ./rust-toolchain ./rust-toolchain
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
@@ -84,7 +85,9 @@ RUN apt-get update && apt-get install -y \
|
|||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
|
dumb-init \
|
||||||
sqlite3 \
|
sqlite3 \
|
||||||
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
@@ -97,12 +100,14 @@ EXPOSE 3012
|
|||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
COPY Rocket.toml .
|
COPY Rocket.toml .
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
COPY --from=build /app/target/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
CMD ["/bitwarden_rs"]
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
108
docker/amd64/Dockerfile.alpine
Normal file
108
docker/amd64/Dockerfile.alpine
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||||
|
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||||
|
# [bitwardenrs/web-vault:v2.19.0]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM clux/muslrust:nightly-2021-02-22 as build
|
||||||
|
|
||||||
|
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||||
|
ARG DB=sqlite,postgresql
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
|
# Don't download rust docs
|
||||||
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
|
ENV USER "root"
|
||||||
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN rustup target add x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM alpine:3.13
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
sqlite \
|
||||||
|
postgresql-libs \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,101 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.10 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.39 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
# Install MySQL package
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
RUN find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM debian:buster-slim
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadbclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,89 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.10 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# Musl build image for statically compiled binary
|
|
||||||
FROM clux/muslrust:nightly-2019-11-23 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmysqlclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM alpine:3.10
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apk add --no-cache \
|
|
||||||
openssl \
|
|
||||||
mariadb-connector-c \
|
|
||||||
curl \
|
|
||||||
ca-certificates
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,90 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.10 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# Musl build image for statically compiled binary
|
|
||||||
FROM clux/muslrust:nightly-2019-11-23 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=postgresql
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libpq-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM alpine:3.10
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apk add --no-cache \
|
|
||||||
openssl \
|
|
||||||
postgresql-libs \
|
|
||||||
curl \
|
|
||||||
sqlite \
|
|
||||||
ca-certificates
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,95 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.10 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.39 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward comaptibility
|
|
||||||
ARG DB=sqlite
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
RUN find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM debian:buster-slim
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
sqlite3 \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,84 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.10 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# Musl build image for statically compiled binary
|
|
||||||
FROM clux/muslrust:nightly-2019-11-23 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward comaptibility
|
|
||||||
ARG DB=sqlite
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM alpine:3.10
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apk add --no-cache \
|
|
||||||
openssl \
|
|
||||||
curl \
|
|
||||||
sqlite \
|
|
||||||
ca-certificates
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
159
docker/arm64/Dockerfile
Normal file
159
docker/arm64/Dockerfile
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||||
|
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||||
|
# [bitwardenrs/web-vault:v2.19.0]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM rust:1.50 as build
|
||||||
|
|
||||||
|
# Debian-based builds support multidb
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
|
# Don't download rust docs
|
||||||
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
|
# Install required build libs for arm64 architecture.
|
||||||
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture arm64 \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:arm64 \
|
||||||
|
libc6-dev:arm64 \
|
||||||
|
libpq5:arm64 \
|
||||||
|
libpq-dev \
|
||||||
|
libmariadb-dev:arm64 \
|
||||||
|
libmariadb-dev-compat:arm64
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-aarch64-linux-gnu \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||||
|
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||||
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
|
||||||
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||||
|
apt-get download libmariadb-dev-compat:amd64 && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||||
|
rm -rvf ./libmariadb-dev-compat*.deb
|
||||||
|
|
||||||
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
|
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
RUN ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
|
||||||
|
|
||||||
|
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||||
|
RUN rustup target add aarch64-unknown-linux-gnu
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/aarch64-debian:buster
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
sqlite3 \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
159
docker/armv6/Dockerfile
Normal file
159
docker/armv6/Dockerfile
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||||
|
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||||
|
# [bitwardenrs/web-vault:v2.19.0]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM rust:1.50 as build
|
||||||
|
|
||||||
|
# Debian-based builds support multidb
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
|
# Don't download rust docs
|
||||||
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
|
# Install required build libs for armel architecture.
|
||||||
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture armel \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:armel \
|
||||||
|
libc6-dev:armel \
|
||||||
|
libpq5:armel \
|
||||||
|
libpq-dev \
|
||||||
|
libmariadb-dev:armel \
|
||||||
|
libmariadb-dev-compat:armel
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-arm-linux-gnueabi \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||||
|
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||||
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
|
||||||
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||||
|
apt-get download libmariadb-dev-compat:amd64 && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||||
|
rm -rvf ./libmariadb-dev-compat*.deb
|
||||||
|
|
||||||
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
|
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
RUN ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
|
||||||
|
|
||||||
|
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||||
|
RUN rustup target add arm-unknown-linux-gnueabi
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/rpi-debian:buster
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
sqlite3 \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,110 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.10 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.39 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Prepare openssl armel libs
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:armel \
|
|
||||||
libc6-dev:armel \
|
|
||||||
libmariadb-dev:armel
|
|
||||||
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/rpi-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadbclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,109 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.10 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.39 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward comaptibility
|
|
||||||
ARG DB=sqlite
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Prepare openssl armel libs
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:armel \
|
|
||||||
libc6-dev:armel
|
|
||||||
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/rpi-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
sqlite3 \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
159
docker/armv7/Dockerfile
Normal file
159
docker/armv7/Dockerfile
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||||
|
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||||
|
# [bitwardenrs/web-vault:v2.19.0]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM rust:1.50 as build
|
||||||
|
|
||||||
|
# Debian-based builds support multidb
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
|
# Don't download rust docs
|
||||||
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
|
# Install required build libs for armhf architecture.
|
||||||
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture armhf \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:armhf \
|
||||||
|
libc6-dev:armhf \
|
||||||
|
libpq5:armhf \
|
||||||
|
libpq-dev \
|
||||||
|
libmariadb-dev:armhf \
|
||||||
|
libmariadb-dev-compat:armhf
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-arm-linux-gnueabihf \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||||
|
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||||
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
|
||||||
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||||
|
apt-get download libmariadb-dev-compat:amd64 && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||||
|
rm -rvf ./libmariadb-dev-compat*.deb
|
||||||
|
|
||||||
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
|
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
RUN ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
|
||||||
|
|
||||||
|
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||||
|
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/armv7hf-debian:buster
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
sqlite3 \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
114
docker/armv7/Dockerfile.alpine
Normal file
114
docker/armv7/Dockerfile.alpine
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
||||||
|
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
||||||
|
# [bitwardenrs/web-vault:v2.19.0]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
||||||
|
|
||||||
|
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||||
|
ARG DB=sqlite
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
|
# Don't download rust docs
|
||||||
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
|
ENV USER "root"
|
||||||
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
RUN rustup target add armv7-unknown-linux-musleabihf
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||||
|
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/bitwarden_rs
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/armv7hf-alpine:3.13
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
sqlite \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/bitwarden_rs .
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
|
COPY docker/start.sh /start.sh
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
|
CMD ["/start.sh"]
|
@@ -1,111 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.10 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.39 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Prepare openssl armhf libs
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
libc6-dev:armhf \
|
|
||||||
libmariadb-dev:armhf
|
|
||||||
|
|
||||||
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/armv7hf-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadbclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
@@ -1,109 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM alpine:3.10 as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "v2.12.0b"
|
|
||||||
|
|
||||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --no-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
tar
|
|
||||||
|
|
||||||
RUN mkdir /web-vault
|
|
||||||
WORKDIR /web-vault
|
|
||||||
|
|
||||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN curl -L $URL | tar xz
|
|
||||||
RUN ls
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.39 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward comaptibility
|
|
||||||
ARG DB=sqlite
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Prepare openssl armhf libs
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
libc6-dev:armhf
|
|
||||||
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/armv7hf-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
sqlite3 \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/bitwarden_rs"]
|
|
57
docker/healthcheck.sh
Normal file → Executable file
57
docker/healthcheck.sh
Normal file → Executable file
@@ -1,8 +1,53 @@
|
|||||||
#!/usr/bin/env sh
|
#!/bin/sh
|
||||||
|
|
||||||
if [ -z "$ROCKET_TLS"]
|
# Use the value of the corresponding env var (if present),
|
||||||
then
|
# or a default value otherwise.
|
||||||
curl --fail http://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
|
: ${DATA_FOLDER:="data"}
|
||||||
else
|
: ${ROCKET_PORT:="80"}
|
||||||
curl --insecure --fail https://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
|
|
||||||
|
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||||
|
|
||||||
|
# Given a config key, return the corresponding config value from the
|
||||||
|
# config file. If the key doesn't exist, return an empty string.
|
||||||
|
get_config_val() {
|
||||||
|
local key="$1"
|
||||||
|
# Extract a line of the form:
|
||||||
|
# "domain": "https://bw.example.com/path",
|
||||||
|
grep "\"${key}\":" "${CONFIG_FILE}" |
|
||||||
|
# To extract just the value (https://bw.example.com/path), delete:
|
||||||
|
# (1) everything up to and including the first ':',
|
||||||
|
# (2) whitespace and '"' from the front,
|
||||||
|
# (3) ',' and '"' from the back.
|
||||||
|
sed -e 's/[^:]\+://' -e 's/^[ "]\+//' -e 's/[,"]\+$//'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Extract the base path from a domain URL. For example:
|
||||||
|
# - `` -> ``
|
||||||
|
# - `https://bw.example.com` -> ``
|
||||||
|
# - `https://bw.example.com/` -> ``
|
||||||
|
# - `https://bw.example.com/path` -> `/path`
|
||||||
|
# - `https://bw.example.com/multi/path` -> `/multi/path`
|
||||||
|
get_base_path() {
|
||||||
|
echo "$1" |
|
||||||
|
# Delete:
|
||||||
|
# (1) everything up to and including '://',
|
||||||
|
# (2) everything up to '/',
|
||||||
|
# (3) trailing '/' from the back.
|
||||||
|
sed -e 's|.*://||' -e 's|[^/]\+||' -e 's|/*$||'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Read domain URL from config.json, if present.
|
||||||
|
if [ -r "${CONFIG_FILE}" ]; then
|
||||||
|
domain="$(get_config_val 'domain')"
|
||||||
|
if [ -n "${domain}" ]; then
|
||||||
|
# config.json 'domain' overrides the DOMAIN env var.
|
||||||
|
DOMAIN="${domain}"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
base_path="$(get_base_path "${DOMAIN}")"
|
||||||
|
if [ -n "${ROCKET_TLS}" ]; then
|
||||||
|
s='s'
|
||||||
|
fi
|
||||||
|
curl --insecure --fail --silent --show-error \
|
||||||
|
"http${s}://localhost:${ROCKET_PORT}${base_path}/alive" || exit 1
|
||||||
|
17
docker/render_template
Executable file
17
docker/render_template
Executable file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import os, argparse, json
|
||||||
|
|
||||||
|
import jinja2
|
||||||
|
|
||||||
|
args_parser = argparse.ArgumentParser()
|
||||||
|
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
||||||
|
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
||||||
|
cli_args = args_parser.parse_args()
|
||||||
|
|
||||||
|
render_vars = json.loads(cli_args.render_vars)
|
||||||
|
environment = jinja2.Environment(
|
||||||
|
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||||
|
trim_blocks=True,
|
||||||
|
)
|
||||||
|
print(environment.get_template(cli_args.template_file).render(render_vars))
|
15
docker/start.sh
Executable file
15
docker/start.sh
Executable file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
if [ -r /etc/bitwarden_rs.sh ]; then
|
||||||
|
. /etc/bitwarden_rs.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -d /etc/bitwarden_rs.d ]; then
|
||||||
|
for f in /etc/bitwarden_rs.d/*.sh; do
|
||||||
|
if [ -r $f ]; then
|
||||||
|
. $f
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
exec /bitwarden_rs "${@}"
|
20
hooks/README.md
Normal file
20
hooks/README.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
The hooks in this directory are used to create multi-arch images using Docker Hub automated builds.
|
||||||
|
|
||||||
|
Docker Hub hooks provide these predefined [environment variables](https://docs.docker.com/docker-hub/builds/advanced/#environment-variables-for-building-and-testing):
|
||||||
|
|
||||||
|
* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested.
|
||||||
|
* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested.
|
||||||
|
* `COMMIT_MSG`: the message from the commit being tested and built.
|
||||||
|
* `DOCKER_REPO`: the name of the Docker repository being built.
|
||||||
|
* `DOCKERFILE_PATH`: the dockerfile currently being built.
|
||||||
|
* `DOCKER_TAG`: the Docker repository tag being built.
|
||||||
|
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
||||||
|
|
||||||
|
The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
* https://docs.docker.com/docker-hub/builds/advanced/
|
||||||
|
* https://docs.docker.com/engine/reference/commandline/manifest/
|
||||||
|
* https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/
|
||||||
|
* https://success.docker.com/article/how-do-i-authenticate-with-the-v2-api
|
16
hooks/arches.sh
Normal file
16
hooks/arches.sh
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# The default Debian-based images support these arches for all database backends.
|
||||||
|
arches=(
|
||||||
|
amd64
|
||||||
|
armv6
|
||||||
|
armv7
|
||||||
|
arm64
|
||||||
|
)
|
||||||
|
|
||||||
|
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||||
|
# The Alpine image build currently only works for certain arches.
|
||||||
|
distro_suffix=.alpine
|
||||||
|
arches=(
|
||||||
|
amd64
|
||||||
|
armv7
|
||||||
|
)
|
||||||
|
fi
|
45
hooks/build
Executable file
45
hooks/build
Executable file
@@ -0,0 +1,45 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ">>> Building images..."
|
||||||
|
|
||||||
|
source ./hooks/arches.sh
|
||||||
|
|
||||||
|
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
||||||
|
# This var is typically predefined by Docker Hub, but it won't be
|
||||||
|
# when testing locally.
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Construct a version string in the style of `build.rs`.
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
LABELS=(
|
||||||
|
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||||
|
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||||
|
org.opencontainers.image.documentation="https://github.com/dani-garcia/bitwarden_rs/wiki"
|
||||||
|
org.opencontainers.image.licenses="GPL-3.0-only"
|
||||||
|
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
||||||
|
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
||||||
|
org.opencontainers.image.url="https://hub.docker.com/r/${DOCKER_REPO#*/}"
|
||||||
|
org.opencontainers.image.version="${SOURCE_VERSION}"
|
||||||
|
)
|
||||||
|
LABEL_ARGS=()
|
||||||
|
for label in "${LABELS[@]}"; do
|
||||||
|
LABEL_ARGS+=(--label "${label}")
|
||||||
|
done
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
for arch in "${arches[@]}"; do
|
||||||
|
docker build \
|
||||||
|
"${LABEL_ARGS[@]}" \
|
||||||
|
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||||
|
-f docker/${arch}/Dockerfile${distro_suffix} \
|
||||||
|
.
|
||||||
|
done
|
28
hooks/pre_build
Executable file
28
hooks/pre_build
Executable file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# If requested, print some environment info for troubleshooting.
|
||||||
|
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
|
||||||
|
id
|
||||||
|
pwd
|
||||||
|
df -h
|
||||||
|
env
|
||||||
|
docker info
|
||||||
|
docker version
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install build dependencies.
|
||||||
|
deps=(
|
||||||
|
jq
|
||||||
|
)
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y "${deps[@]}"
|
||||||
|
|
||||||
|
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
|
||||||
|
# Git operations that we perform later, so fetch the complete history and
|
||||||
|
# tags first. Note that if the build is cached, the clone may have been
|
||||||
|
# unshallowed already; if so, unshallowing will fail, so skip it.
|
||||||
|
if [[ -f .git/shallow ]]; then
|
||||||
|
git fetch --unshallow --tags
|
||||||
|
fi
|
138
hooks/push
Executable file
138
hooks/push
Executable file
@@ -0,0 +1,138 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
source ./hooks/arches.sh
|
||||||
|
|
||||||
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
|
||||||
|
# Join a list of args with a single char.
|
||||||
|
# Ref: https://stackoverflow.com/a/17841619
|
||||||
|
join() { local IFS="$1"; shift; echo "$*"; }
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
echo ">>> Starting local Docker registry..."
|
||||||
|
|
||||||
|
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
||||||
|
# builds, but it can't access existing images on the Docker host (like the
|
||||||
|
# cross-compiled ones we just built). Those images first need to be pushed to
|
||||||
|
# a registry -- Docker Hub could be used, but since it's not trivial to clean
|
||||||
|
# up those intermediate images on Docker Hub, it's easier to just run a local
|
||||||
|
# Docker registry, which gets cleaned up automatically once the build job ends.
|
||||||
|
#
|
||||||
|
# https://docs.docker.com/registry/deploying/
|
||||||
|
# https://hub.docker.com/_/registry
|
||||||
|
#
|
||||||
|
# Use host networking so the buildx container can access the registry via
|
||||||
|
# localhost.
|
||||||
|
#
|
||||||
|
docker run -d --name registry --network host registry:2 # defaults to port 5000
|
||||||
|
|
||||||
|
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
||||||
|
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
||||||
|
LOCAL_REGISTRY="localhost:5000"
|
||||||
|
REPO="${DOCKER_REPO#*/}"
|
||||||
|
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
||||||
|
|
||||||
|
echo ">>> Pushing images to local registry..."
|
||||||
|
|
||||||
|
for arch in ${arches[@]}; do
|
||||||
|
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
||||||
|
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
||||||
|
docker tag "${docker_image}" "${local_image}"
|
||||||
|
docker push "${local_image}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ">>> Setting up Docker Buildx..."
|
||||||
|
|
||||||
|
# Same as earlier, use host networking so the buildx container can access the
|
||||||
|
# registry via localhost.
|
||||||
|
#
|
||||||
|
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
||||||
|
#
|
||||||
|
docker buildx create --name builder --use --driver-opt network=host
|
||||||
|
|
||||||
|
echo ">>> Running Docker Buildx..."
|
||||||
|
|
||||||
|
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
||||||
|
|
||||||
|
# If the Docker tag starts with a version number, assume the latest release
|
||||||
|
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
|
||||||
|
# to make it easier for users to track the latest release.
|
||||||
|
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||||
|
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||||
|
tags+=(${DOCKER_REPO}:alpine)
|
||||||
|
else
|
||||||
|
tags+=(${DOCKER_REPO}:latest)
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
tag_args=()
|
||||||
|
for tag in "${tags[@]}"; do
|
||||||
|
tag_args+=(--tag "${tag}")
|
||||||
|
done
|
||||||
|
|
||||||
|
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
|
||||||
|
# the arch list to a platform list (assuming the OS is always `linux`).
|
||||||
|
declare -A arch_to_platform=(
|
||||||
|
[amd64]="linux/amd64"
|
||||||
|
[armv6]="linux/arm/v6"
|
||||||
|
[armv7]="linux/arm/v7"
|
||||||
|
[arm64]="linux/arm64"
|
||||||
|
)
|
||||||
|
platforms=()
|
||||||
|
for arch in ${arches[@]}; do
|
||||||
|
platforms+=("${arch_to_platform[$arch]}")
|
||||||
|
done
|
||||||
|
platforms="$(join "," "${platforms[@]}")"
|
||||||
|
|
||||||
|
# Run the build, pushing the resulting images and multi-arch manifest list to
|
||||||
|
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
||||||
|
# context, which isn't needed here since the actual cross-compiled images
|
||||||
|
# have already been built.
|
||||||
|
docker buildx build \
|
||||||
|
--network host \
|
||||||
|
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
||||||
|
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
||||||
|
--platform "${platforms}" \
|
||||||
|
"${tag_args[@]}" \
|
||||||
|
--push \
|
||||||
|
- < ./docker/Dockerfile.buildx
|
||||||
|
|
||||||
|
# Add an extra arch-specific tag for `arm32v6`; Docker can't seem to properly
|
||||||
|
# auto-select that image on ARMv6 platforms like Raspberry Pi 1 and Zero
|
||||||
|
# (https://github.com/moby/moby/issues/41017).
|
||||||
|
#
|
||||||
|
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
|
||||||
|
# existing bitwarden_rs tags, which adhere to the naming conventions of the
|
||||||
|
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
|
||||||
|
# Unfortunately, these per-arch repo names aren't always consistent with the
|
||||||
|
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
|
||||||
|
# 32-bit ARM arches (e.g., `linux/arm/v6` is used, not `linux/arm32/v6`).
|
||||||
|
#
|
||||||
|
# TODO: It looks like this issue should be fixed starting in Docker 20.10.0,
|
||||||
|
# so this step can be removed once fixed versions are in wider distribution.
|
||||||
|
#
|
||||||
|
# Tags:
|
||||||
|
#
|
||||||
|
# testing => testing-arm32v6
|
||||||
|
# testing-alpine => <ignored>
|
||||||
|
# x.y.z => x.y.z-arm32v6, latest-arm32v6
|
||||||
|
# x.y.z-alpine => <ignored>
|
||||||
|
#
|
||||||
|
if [[ "${DOCKER_TAG}" != *alpine ]]; then
|
||||||
|
image="${DOCKER_REPO}":"${DOCKER_TAG}"
|
||||||
|
|
||||||
|
# Fetch the multi-arch manifest list and find the digest of the armv6 image.
|
||||||
|
filter='.manifests|.[]|select(.platform.architecture=="arm" and .platform.variant=="v6")|.digest'
|
||||||
|
digest="$(docker manifest inspect "${image}" | jq -r "${filter}")"
|
||||||
|
|
||||||
|
# Pull the armv6 image by digest, retag it, and repush it.
|
||||||
|
docker pull "${DOCKER_REPO}"@"${digest}"
|
||||||
|
docker tag "${DOCKER_REPO}"@"${digest}" "${image}"-arm32v6
|
||||||
|
docker push "${image}"-arm32v6
|
||||||
|
|
||||||
|
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||||
|
docker tag "${image}"-arm32v6 "${DOCKER_REPO}:latest"-arm32v6
|
||||||
|
docker push "${DOCKER_REPO}:latest"-arm32v6
|
||||||
|
fi
|
||||||
|
fi
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE org_policies;
|
@@ -0,0 +1,9 @@
|
|||||||
|
CREATE TABLE org_policies (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
enabled BOOLEAN NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
|
||||||
|
UNIQUE (org_uuid, atype)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN
|
||||||
|
deleted_at DATETIME;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_collections
|
||||||
|
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE;
|
@@ -0,0 +1,13 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE;
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
UPDATE ciphers
|
||||||
|
SET favorite = TRUE
|
||||||
|
WHERE EXISTS (
|
||||||
|
SELECT * FROM favorites
|
||||||
|
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||||
|
AND favorites.cipher_uuid = ciphers.uuid
|
||||||
|
);
|
||||||
|
|
||||||
|
DROP TABLE favorites;
|
@@ -0,0 +1,16 @@
|
|||||||
|
CREATE TABLE favorites (
|
||||||
|
user_uuid CHAR(36) NOT NULL REFERENCES users(uuid),
|
||||||
|
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers(uuid),
|
||||||
|
|
||||||
|
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||||
|
SELECT user_uuid, uuid
|
||||||
|
FROM ciphers
|
||||||
|
WHERE favorite = TRUE
|
||||||
|
AND user_uuid IS NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE ciphers
|
||||||
|
DROP COLUMN favorite;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
1
migrations/mysql/2021-03-11-190243_add_sends/down.sql
Normal file
1
migrations/mysql/2021-03-11-190243_add_sends/down.sql
Normal file
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE sends;
|
25
migrations/mysql/2021-03-11-190243_add_sends/up.sql
Normal file
25
migrations/mysql/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
CREATE TABLE sends (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) REFERENCES users (uuid),
|
||||||
|
organization_uuid CHAR(36) REFERENCES organizations (uuid),
|
||||||
|
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
akey TEXT NOT NULL,
|
||||||
|
password_hash BLOB,
|
||||||
|
password_salt BLOB,
|
||||||
|
password_iter INTEGER,
|
||||||
|
|
||||||
|
max_access_count INTEGER,
|
||||||
|
access_count INTEGER NOT NULL,
|
||||||
|
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
revision_date DATETIME NOT NULL,
|
||||||
|
expiration_date DATETIME,
|
||||||
|
deletion_date DATETIME NOT NULL,
|
||||||
|
|
||||||
|
disabled BOOLEAN NOT NULL
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE org_policies;
|
@@ -0,0 +1,9 @@
|
|||||||
|
CREATE TABLE org_policies (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
enabled BOOLEAN NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
|
||||||
|
UNIQUE (org_uuid, atype)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN
|
||||||
|
deleted_at TIMESTAMP;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_collections
|
||||||
|
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE;
|
@@ -0,0 +1,13 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE;
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
UPDATE ciphers
|
||||||
|
SET favorite = TRUE
|
||||||
|
WHERE EXISTS (
|
||||||
|
SELECT * FROM favorites
|
||||||
|
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||||
|
AND favorites.cipher_uuid = ciphers.uuid
|
||||||
|
);
|
||||||
|
|
||||||
|
DROP TABLE favorites;
|
@@ -0,0 +1,16 @@
|
|||||||
|
CREATE TABLE favorites (
|
||||||
|
user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid),
|
||||||
|
cipher_uuid VARCHAR(40) NOT NULL REFERENCES ciphers(uuid),
|
||||||
|
|
||||||
|
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||||
|
SELECT user_uuid, uuid
|
||||||
|
FROM ciphers
|
||||||
|
WHERE favorite = TRUE
|
||||||
|
AND user_uuid IS NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE ciphers
|
||||||
|
DROP COLUMN favorite;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT true;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE sends;
|
25
migrations/postgresql/2021-03-11-190243_add_sends/up.sql
Normal file
25
migrations/postgresql/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
CREATE TABLE sends (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) REFERENCES users (uuid),
|
||||||
|
organization_uuid CHAR(36) REFERENCES organizations (uuid),
|
||||||
|
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
key TEXT NOT NULL,
|
||||||
|
password_hash BYTEA,
|
||||||
|
password_salt BYTEA,
|
||||||
|
password_iter INTEGER,
|
||||||
|
|
||||||
|
max_access_count INTEGER,
|
||||||
|
access_count INTEGER NOT NULL,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP NOT NULL,
|
||||||
|
revision_date TIMESTAMP NOT NULL,
|
||||||
|
expiration_date TIMESTAMP,
|
||||||
|
deletion_date TIMESTAMP NOT NULL,
|
||||||
|
|
||||||
|
disabled BOOLEAN NOT NULL
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE sends RENAME COLUMN key TO akey;
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE org_policies;
|
@@ -0,0 +1,9 @@
|
|||||||
|
CREATE TABLE org_policies (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
org_uuid TEXT NOT NULL REFERENCES organizations (uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
enabled BOOLEAN NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
|
||||||
|
UNIQUE (org_uuid, atype)
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN
|
||||||
|
deleted_at DATETIME;
|
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE users_collections
|
||||||
|
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
@@ -0,0 +1,13 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
UPDATE ciphers
|
||||||
|
SET favorite = 1
|
||||||
|
WHERE EXISTS (
|
||||||
|
SELECT * FROM favorites
|
||||||
|
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||||
|
AND favorites.cipher_uuid = ciphers.uuid
|
||||||
|
);
|
||||||
|
|
||||||
|
DROP TABLE favorites;
|
@@ -0,0 +1,71 @@
|
|||||||
|
CREATE TABLE favorites (
|
||||||
|
user_uuid TEXT NOT NULL REFERENCES users(uuid),
|
||||||
|
cipher_uuid TEXT NOT NULL REFERENCES ciphers(uuid),
|
||||||
|
|
||||||
|
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||||
|
SELECT user_uuid, uuid
|
||||||
|
FROM ciphers
|
||||||
|
WHERE favorite = 1
|
||||||
|
AND user_uuid IS NOT NULL;
|
||||||
|
|
||||||
|
-- Drop the `favorite` column from the `ciphers` table, using the 12-step
|
||||||
|
-- procedure from <https://www.sqlite.org/lang_altertable.html#altertabrename>.
|
||||||
|
-- Note that some steps aren't applicable and are omitted.
|
||||||
|
|
||||||
|
-- 1. If foreign key constraints are enabled, disable them using PRAGMA foreign_keys=OFF.
|
||||||
|
--
|
||||||
|
-- Diesel runs each migration in its own transaction. `PRAGMA foreign_keys`
|
||||||
|
-- is a no-op within a transaction, so this step must be done outside of this
|
||||||
|
-- file, before starting the Diesel migrations.
|
||||||
|
|
||||||
|
-- 2. Start a transaction.
|
||||||
|
--
|
||||||
|
-- Diesel already runs each migration in its own transaction.
|
||||||
|
|
||||||
|
-- 4. Use CREATE TABLE to construct a new table "new_X" that is in the
|
||||||
|
-- desired revised format of table X. Make sure that the name "new_X" does
|
||||||
|
-- not collide with any existing table name, of course.
|
||||||
|
|
||||||
|
CREATE TABLE new_ciphers(
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
created_at DATETIME NOT NULL,
|
||||||
|
updated_at DATETIME NOT NULL,
|
||||||
|
user_uuid TEXT REFERENCES users(uuid),
|
||||||
|
organization_uuid TEXT REFERENCES organizations(uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
fields TEXT,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
password_history TEXT,
|
||||||
|
deleted_at DATETIME
|
||||||
|
);
|
||||||
|
|
||||||
|
-- 5. Transfer content from X into new_X using a statement like:
|
||||||
|
-- INSERT INTO new_X SELECT ... FROM X.
|
||||||
|
|
||||||
|
INSERT INTO new_ciphers(uuid, created_at, updated_at, user_uuid, organization_uuid, atype,
|
||||||
|
name, notes, fields, data, password_history, deleted_at)
|
||||||
|
SELECT uuid, created_at, updated_at, user_uuid, organization_uuid, atype,
|
||||||
|
name, notes, fields, data, password_history, deleted_at
|
||||||
|
FROM ciphers;
|
||||||
|
|
||||||
|
-- 6. Drop the old table X: DROP TABLE X.
|
||||||
|
|
||||||
|
DROP TABLE ciphers;
|
||||||
|
|
||||||
|
-- 7. Change the name of new_X to X using: ALTER TABLE new_X RENAME TO X.
|
||||||
|
|
||||||
|
ALTER TABLE new_ciphers RENAME TO ciphers;
|
||||||
|
|
||||||
|
-- 11. Commit the transaction started in step 2.
|
||||||
|
|
||||||
|
-- 12. If foreign keys constraints were originally enabled, reenable them now.
|
||||||
|
--
|
||||||
|
-- `PRAGMA foreign_keys` is scoped to a database connection, and Diesel
|
||||||
|
-- migrations are run in a separate database connection that is closed once
|
||||||
|
-- the migrations finish.
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
1
migrations/sqlite/2021-03-11-190243_add_sends/down.sql
Normal file
1
migrations/sqlite/2021-03-11-190243_add_sends/down.sql
Normal file
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE sends;
|
25
migrations/sqlite/2021-03-11-190243_add_sends/up.sql
Normal file
25
migrations/sqlite/2021-03-11-190243_add_sends/up.sql
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
CREATE TABLE sends (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid TEXT REFERENCES users (uuid),
|
||||||
|
organization_uuid TEXT REFERENCES organizations (uuid),
|
||||||
|
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
key TEXT NOT NULL,
|
||||||
|
password_hash BLOB,
|
||||||
|
password_salt BLOB,
|
||||||
|
password_iter INTEGER,
|
||||||
|
|
||||||
|
max_access_count INTEGER,
|
||||||
|
access_count INTEGER NOT NULL,
|
||||||
|
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
revision_date DATETIME NOT NULL,
|
||||||
|
expiration_date DATETIME,
|
||||||
|
deletion_date DATETIME NOT NULL,
|
||||||
|
|
||||||
|
disabled BOOLEAN NOT NULL
|
||||||
|
);
|
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE sends RENAME COLUMN key TO akey;
|
@@ -1 +1 @@
|
|||||||
nightly-2019-11-17
|
nightly-2021-02-22
|
@@ -1 +1,2 @@
|
|||||||
|
version = "Two"
|
||||||
max_width = 120
|
max_width = 120
|
||||||
|
482
src/api/admin.rs
482
src/api/admin.rs
@@ -1,44 +1,75 @@
|
|||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::process::Command;
|
use std::{env, process::Command, time::Duration};
|
||||||
|
|
||||||
use rocket::http::{Cookie, Cookies, SameSite};
|
use reqwest::{blocking::Client, header::USER_AGENT};
|
||||||
use rocket::request::{self, FlashMessage, Form, FromRequest, Request};
|
use rocket::{
|
||||||
use rocket::response::{content::Html, Flash, Redirect};
|
http::{Cookie, Cookies, SameSite},
|
||||||
use rocket::{Outcome, Route};
|
request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
|
||||||
|
response::{content::Html, Flash, Redirect},
|
||||||
|
Route,
|
||||||
|
};
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
|
|
||||||
use crate::api::{ApiResult, EmptyResult, JsonResult};
|
use crate::{
|
||||||
use crate::auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp};
|
api::{ApiResult, EmptyResult, NumberOrString},
|
||||||
use crate::config::ConfigBuilder;
|
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||||
use crate::db::{backup_database, models::*, DbConn};
|
config::ConfigBuilder,
|
||||||
use crate::error::Error;
|
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||||
use crate::mail;
|
error::{Error, MapResult},
|
||||||
use crate::CONFIG;
|
mail,
|
||||||
|
util::{format_naive_datetime_local, get_display_size, is_running_in_docker},
|
||||||
|
CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
if CONFIG.admin_token().is_none() && !CONFIG.disable_admin_token() {
|
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
|
||||||
return routes![admin_disabled];
|
return routes![admin_disabled];
|
||||||
}
|
}
|
||||||
|
|
||||||
routes![
|
routes![
|
||||||
admin_login,
|
admin_login,
|
||||||
get_users,
|
get_users_json,
|
||||||
post_admin_login,
|
post_admin_login,
|
||||||
admin_page,
|
admin_page,
|
||||||
invite_user,
|
invite_user,
|
||||||
|
logout,
|
||||||
delete_user,
|
delete_user,
|
||||||
deauth_user,
|
deauth_user,
|
||||||
|
disable_user,
|
||||||
|
enable_user,
|
||||||
remove_2fa,
|
remove_2fa,
|
||||||
|
update_user_org_type,
|
||||||
update_revision_users,
|
update_revision_users,
|
||||||
post_config,
|
post_config,
|
||||||
delete_config,
|
delete_config,
|
||||||
backup_db,
|
backup_db,
|
||||||
|
test_smtp,
|
||||||
|
users_overview,
|
||||||
|
organizations_overview,
|
||||||
|
delete_organization,
|
||||||
|
diagnostics,
|
||||||
|
get_diagnostics_config
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
static DB_TYPE: Lazy<&str> = Lazy::new(|| {
|
||||||
static ref CAN_BACKUP: bool = cfg!(feature = "sqlite") && Command::new("sqlite3").arg("-version").status().is_ok();
|
DbConnType::from_url(&CONFIG.database_url())
|
||||||
}
|
.map(|t| match t {
|
||||||
|
DbConnType::sqlite => "SQLite",
|
||||||
|
DbConnType::mysql => "MySQL",
|
||||||
|
DbConnType::postgresql => "PostgreSQL",
|
||||||
|
})
|
||||||
|
.unwrap_or("Unknown")
|
||||||
|
});
|
||||||
|
|
||||||
|
static CAN_BACKUP: Lazy<bool> = Lazy::new(|| {
|
||||||
|
DbConnType::from_url(&CONFIG.database_url())
|
||||||
|
.map(|t| t == DbConnType::sqlite)
|
||||||
|
.unwrap_or(false)
|
||||||
|
&& Command::new("sqlite3").arg("-version").status().is_ok()
|
||||||
|
});
|
||||||
|
|
||||||
#[get("/")]
|
#[get("/")]
|
||||||
fn admin_disabled() -> &'static str {
|
fn admin_disabled() -> &'static str {
|
||||||
@@ -49,13 +80,69 @@ const COOKIE_NAME: &str = "BWRS_ADMIN";
|
|||||||
const ADMIN_PATH: &str = "/admin";
|
const ADMIN_PATH: &str = "/admin";
|
||||||
|
|
||||||
const BASE_TEMPLATE: &str = "admin/base";
|
const BASE_TEMPLATE: &str = "admin/base";
|
||||||
const VERSION: Option<&str> = option_env!("GIT_VERSION");
|
const VERSION: Option<&str> = option_env!("BWRS_VERSION");
|
||||||
|
|
||||||
|
fn admin_path() -> String {
|
||||||
|
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Referer(Option<String>);
|
||||||
|
|
||||||
|
impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||||
|
Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct IpHeader(Option<String>);
|
||||||
|
|
||||||
|
impl<'a, 'r> FromRequest<'a, 'r> for IpHeader {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||||
|
if req.headers().get_one(&CONFIG.ip_header()).is_some() {
|
||||||
|
Outcome::Success(IpHeader(Some(CONFIG.ip_header())))
|
||||||
|
} else if req.headers().get_one("X-Client-IP").is_some() {
|
||||||
|
Outcome::Success(IpHeader(Some(String::from("X-Client-IP"))))
|
||||||
|
} else if req.headers().get_one("X-Real-IP").is_some() {
|
||||||
|
Outcome::Success(IpHeader(Some(String::from("X-Real-IP"))))
|
||||||
|
} else if req.headers().get_one("X-Forwarded-For").is_some() {
|
||||||
|
Outcome::Success(IpHeader(Some(String::from("X-Forwarded-For"))))
|
||||||
|
} else {
|
||||||
|
Outcome::Success(IpHeader(None))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Used for `Location` response headers, which must specify an absolute URI
|
||||||
|
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
|
||||||
|
fn admin_url(referer: Referer) -> String {
|
||||||
|
// If we get a referer use that to make it work when, DOMAIN is not set
|
||||||
|
if let Some(mut referer) = referer.0 {
|
||||||
|
if let Some(start_index) = referer.find(ADMIN_PATH) {
|
||||||
|
referer.truncate(start_index + ADMIN_PATH.len());
|
||||||
|
return referer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if CONFIG.domain_set() {
|
||||||
|
// Don't use CONFIG.domain() directly, since the user may want to keep a
|
||||||
|
// trailing slash there, particularly when running under a subpath.
|
||||||
|
format!("{}{}{}", CONFIG.domain_origin(), CONFIG.domain_path(), ADMIN_PATH)
|
||||||
|
} else {
|
||||||
|
// Last case, when no referer or domain set, technically invalid but better than nothing
|
||||||
|
ADMIN_PATH.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/", rank = 2)]
|
#[get("/", rank = 2)]
|
||||||
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||||
// If there is an error, show it
|
// If there is an error, show it
|
||||||
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
||||||
let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg});
|
let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg, "urlpath": CONFIG.domain_path()});
|
||||||
|
|
||||||
// Return the page
|
// Return the page
|
||||||
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
||||||
@@ -68,14 +155,19 @@ struct LoginForm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/", data = "<data>")]
|
#[post("/", data = "<data>")]
|
||||||
fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -> Result<Redirect, Flash<Redirect>> {
|
fn post_admin_login(
|
||||||
|
data: Form<LoginForm>,
|
||||||
|
mut cookies: Cookies,
|
||||||
|
ip: ClientIp,
|
||||||
|
referer: Referer,
|
||||||
|
) -> Result<Redirect, Flash<Redirect>> {
|
||||||
let data = data.into_inner();
|
let data = data.into_inner();
|
||||||
|
|
||||||
// If the token is invalid, redirect to login page
|
// If the token is invalid, redirect to login page
|
||||||
if !_validate_token(&data.token) {
|
if !_validate_token(&data.token) {
|
||||||
error!("Invalid admin token. IP: {}", ip.ip);
|
error!("Invalid admin token. IP: {}", ip.ip);
|
||||||
Err(Flash::error(
|
Err(Flash::error(
|
||||||
Redirect::to(ADMIN_PATH),
|
Redirect::to(admin_url(referer)),
|
||||||
"Invalid admin token, please try again.",
|
"Invalid admin token, please try again.",
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
@@ -84,14 +176,14 @@ fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -
|
|||||||
let jwt = encode_jwt(&claims);
|
let jwt = encode_jwt(&claims);
|
||||||
|
|
||||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||||
.path(ADMIN_PATH)
|
.path(admin_path())
|
||||||
.max_age(chrono::Duration::minutes(20))
|
.max_age(time::Duration::minutes(20))
|
||||||
.same_site(SameSite::Strict)
|
.same_site(SameSite::Strict)
|
||||||
.http_only(true)
|
.http_only(true)
|
||||||
.finish();
|
.finish();
|
||||||
|
|
||||||
cookies.add(cookie);
|
cookies.add(cookie);
|
||||||
Ok(Redirect::to(ADMIN_PATH))
|
Ok(Redirect::to(admin_url(referer)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -106,19 +198,69 @@ fn _validate_token(token: &str) -> bool {
|
|||||||
struct AdminTemplateData {
|
struct AdminTemplateData {
|
||||||
page_content: String,
|
page_content: String,
|
||||||
version: Option<&'static str>,
|
version: Option<&'static str>,
|
||||||
users: Vec<Value>,
|
users: Option<Vec<Value>>,
|
||||||
|
organizations: Option<Vec<Value>>,
|
||||||
|
diagnostics: Option<Value>,
|
||||||
config: Value,
|
config: Value,
|
||||||
can_backup: bool,
|
can_backup: bool,
|
||||||
|
logged_in: bool,
|
||||||
|
urlpath: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AdminTemplateData {
|
impl AdminTemplateData {
|
||||||
fn new(users: Vec<Value>) -> Self {
|
fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
page_content: String::from("admin/page"),
|
page_content: String::from("admin/settings"),
|
||||||
version: VERSION,
|
version: VERSION,
|
||||||
users,
|
|
||||||
config: CONFIG.prepare_json(),
|
config: CONFIG.prepare_json(),
|
||||||
can_backup: *CAN_BACKUP,
|
can_backup: *CAN_BACKUP,
|
||||||
|
logged_in: true,
|
||||||
|
urlpath: CONFIG.domain_path(),
|
||||||
|
users: None,
|
||||||
|
organizations: None,
|
||||||
|
diagnostics: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn users(users: Vec<Value>) -> Self {
|
||||||
|
Self {
|
||||||
|
page_content: String::from("admin/users"),
|
||||||
|
version: VERSION,
|
||||||
|
users: Some(users),
|
||||||
|
config: CONFIG.prepare_json(),
|
||||||
|
can_backup: *CAN_BACKUP,
|
||||||
|
logged_in: true,
|
||||||
|
urlpath: CONFIG.domain_path(),
|
||||||
|
organizations: None,
|
||||||
|
diagnostics: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn organizations(organizations: Vec<Value>) -> Self {
|
||||||
|
Self {
|
||||||
|
page_content: String::from("admin/organizations"),
|
||||||
|
version: VERSION,
|
||||||
|
organizations: Some(organizations),
|
||||||
|
config: CONFIG.prepare_json(),
|
||||||
|
can_backup: *CAN_BACKUP,
|
||||||
|
logged_in: true,
|
||||||
|
urlpath: CONFIG.domain_path(),
|
||||||
|
users: None,
|
||||||
|
diagnostics: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn diagnostics(diagnostics: Value) -> Self {
|
||||||
|
Self {
|
||||||
|
page_content: String::from("admin/diagnostics"),
|
||||||
|
version: VERSION,
|
||||||
|
organizations: None,
|
||||||
|
config: CONFIG.prepare_json(),
|
||||||
|
can_backup: *CAN_BACKUP,
|
||||||
|
logged_in: true,
|
||||||
|
urlpath: CONFIG.domain_path(),
|
||||||
|
users: None,
|
||||||
|
diagnostics: Some(diagnostics),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -128,11 +270,8 @@ impl AdminTemplateData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/", rank = 1)]
|
#[get("/", rank = 1)]
|
||||||
fn admin_page(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
fn admin_page(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
let users = User::get_all(&conn);
|
let text = AdminTemplateData::new().render()?;
|
||||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
|
||||||
|
|
||||||
let text = AdminTemplateData::new(users_json).render()?;
|
|
||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -150,70 +289,301 @@ fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> Empt
|
|||||||
err!("User already exists")
|
err!("User already exists")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !CONFIG.invitations_allowed() {
|
|
||||||
err!("Invitations are not allowed")
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut user = User::new(email);
|
let mut user = User::new(email);
|
||||||
user.save(&conn)?;
|
user.save(&conn)?;
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
let org_name = "bitwarden_rs";
|
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)
|
||||||
mail::send_invite(&user.email, &user.uuid, None, None, &org_name, None)
|
|
||||||
} else {
|
} else {
|
||||||
let invitation = Invitation::new(data.email);
|
let invitation = Invitation::new(data.email);
|
||||||
invitation.save(&conn)
|
invitation.save(&conn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[post("/test/smtp", data = "<data>")]
|
||||||
|
fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||||
|
let data: InviteData = data.into_inner();
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_test(&data.email)
|
||||||
|
} else {
|
||||||
|
err!("Mail is not enabled")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/logout")]
|
||||||
|
fn logout(mut cookies: Cookies, referer: Referer) -> Redirect {
|
||||||
|
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||||
|
Redirect::to(admin_url(referer))
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/users")]
|
#[get("/users")]
|
||||||
fn get_users(_token: AdminToken, conn: DbConn) -> JsonResult {
|
fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
||||||
let users = User::get_all(&conn);
|
let users = User::get_all(&conn);
|
||||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||||
|
|
||||||
Ok(Json(Value::Array(users_json)))
|
Json(Value::Array(users_json))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/users/overview")]
|
||||||
|
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
|
let users = User::get_all(&conn);
|
||||||
|
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
|
||||||
|
let users_json: Vec<Value> = users.iter()
|
||||||
|
.map(|u| {
|
||||||
|
let mut usr = u.to_json(&conn);
|
||||||
|
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
||||||
|
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
|
||||||
|
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
|
||||||
|
usr["user_enabled"] = json!(u.enabled);
|
||||||
|
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
|
||||||
|
usr["last_active"] = match u.last_active(&conn) {
|
||||||
|
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
|
||||||
|
None => json!("Never")
|
||||||
|
};
|
||||||
|
usr
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let text = AdminTemplateData::users(users_json).render()?;
|
||||||
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/delete")]
|
#[post("/users/<uuid>/delete")]
|
||||||
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let user = match User::find_by_uuid(&uuid, &conn) {
|
let user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
Some(user) => user,
|
|
||||||
None => err!("User doesn't exist"),
|
|
||||||
};
|
|
||||||
|
|
||||||
user.delete(&conn)
|
user.delete(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/deauth")]
|
#[post("/users/<uuid>/deauth")]
|
||||||
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let mut user = match User::find_by_uuid(&uuid, &conn) {
|
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
Some(user) => user,
|
|
||||||
None => err!("User doesn't exist"),
|
|
||||||
};
|
|
||||||
|
|
||||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[post("/users/<uuid>/disable")]
|
||||||
|
fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
|
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
|
user.reset_security_stamp();
|
||||||
|
user.enabled = false;
|
||||||
|
|
||||||
|
user.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/users/<uuid>/enable")]
|
||||||
|
fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
|
user.enabled = true;
|
||||||
|
|
||||||
|
user.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/remove-2fa")]
|
#[post("/users/<uuid>/remove-2fa")]
|
||||||
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let mut user = match User::find_by_uuid(&uuid, &conn) {
|
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
Some(user) => user,
|
|
||||||
None => err!("User doesn't exist"),
|
|
||||||
};
|
|
||||||
|
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
user.totp_recover = None;
|
user.totp_recover = None;
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
struct UserOrgTypeData {
|
||||||
|
user_type: NumberOrString,
|
||||||
|
user_uuid: String,
|
||||||
|
org_uuid: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/users/org_type", data = "<data>")]
|
||||||
|
fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let data: UserOrgTypeData = data.into_inner();
|
||||||
|
|
||||||
|
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn) {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("The specified user isn't member of the organization"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) {
|
||||||
|
Some(new_type) => new_type as i32,
|
||||||
|
None => err!("Invalid type"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||||
|
// Removing owner permmission, check that there are at least another owner
|
||||||
|
let num_owners = UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).len();
|
||||||
|
|
||||||
|
if num_owners <= 1 {
|
||||||
|
err!("Can't change the type of the last owner")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
user_to_edit.atype = new_type as i32;
|
||||||
|
user_to_edit.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#[post("/users/update_revision")]
|
#[post("/users/update_revision")]
|
||||||
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
User::update_all_revisions(&conn)
|
User::update_all_revisions(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/organizations/overview")]
|
||||||
|
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
|
let organizations = Organization::get_all(&conn);
|
||||||
|
let organizations_json: Vec<Value> = organizations.iter()
|
||||||
|
.map(|o| {
|
||||||
|
let mut org = o.to_json();
|
||||||
|
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
||||||
|
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
||||||
|
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
||||||
|
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
||||||
|
org
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let text = AdminTemplateData::organizations(organizations_json).render()?;
|
||||||
|
Ok(Html(text))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/organizations/<uuid>/delete")]
|
||||||
|
fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let org = Organization::find_by_uuid(&uuid, &conn).map_res("Organization doesn't exist")?;
|
||||||
|
org.delete(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct WebVaultVersion {
|
||||||
|
version: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct GitRelease {
|
||||||
|
tag_name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct GitCommit {
|
||||||
|
sha: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||||
|
let github_api = Client::builder().build()?;
|
||||||
|
|
||||||
|
Ok(github_api
|
||||||
|
.get(url)
|
||||||
|
.timeout(Duration::from_secs(10))
|
||||||
|
.header(USER_AGENT, "Bitwarden_RS")
|
||||||
|
.send()?
|
||||||
|
.error_for_status()?
|
||||||
|
.json::<T>()?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn has_http_access() -> bool {
|
||||||
|
let http_access = Client::builder().build().unwrap();
|
||||||
|
|
||||||
|
match http_access
|
||||||
|
.head("https://github.com/dani-garcia/bitwarden_rs")
|
||||||
|
.timeout(Duration::from_secs(10))
|
||||||
|
.header(USER_AGENT, "Bitwarden_RS")
|
||||||
|
.send()
|
||||||
|
{
|
||||||
|
Ok(r) => r.status().is_success(),
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/diagnostics")]
|
||||||
|
fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
|
use crate::util::read_file_string;
|
||||||
|
use chrono::prelude::*;
|
||||||
|
use std::net::ToSocketAddrs;
|
||||||
|
|
||||||
|
// Get current running versions
|
||||||
|
let vault_version_path = format!("{}/{}", CONFIG.web_vault_folder(), "version.json");
|
||||||
|
let vault_version_str = read_file_string(&vault_version_path)?;
|
||||||
|
let web_vault_version: WebVaultVersion = serde_json::from_str(&vault_version_str)?;
|
||||||
|
|
||||||
|
// Execute some environment checks
|
||||||
|
let running_within_docker = is_running_in_docker();
|
||||||
|
let has_http_access = has_http_access();
|
||||||
|
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||||
|
|| env::var_os("http_proxy").is_some()
|
||||||
|
|| env::var_os("HTTPS_PROXY").is_some()
|
||||||
|
|| env::var_os("https_proxy").is_some();
|
||||||
|
|
||||||
|
// Check if we are able to resolve DNS entries
|
||||||
|
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||||
|
Ok(Some(a)) => a.ip().to_string(),
|
||||||
|
_ => "Could not resolve domain name.".to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||||
|
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
|
||||||
|
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
|
||||||
|
(
|
||||||
|
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bitwarden_rs/releases/latest") {
|
||||||
|
Ok(r) => r.tag_name,
|
||||||
|
_ => "-".to_string(),
|
||||||
|
},
|
||||||
|
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/bitwarden_rs/commits/master") {
|
||||||
|
Ok(mut c) => {
|
||||||
|
c.sha.truncate(8);
|
||||||
|
c.sha
|
||||||
|
}
|
||||||
|
_ => "-".to_string(),
|
||||||
|
},
|
||||||
|
// Do not fetch the web-vault version when running within Docker.
|
||||||
|
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||||
|
if running_within_docker {
|
||||||
|
"-".to_string()
|
||||||
|
} else {
|
||||||
|
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest") {
|
||||||
|
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||||
|
_ => "-".to_string(),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
("-".to_string(), "-".to_string(), "-".to_string())
|
||||||
|
};
|
||||||
|
|
||||||
|
let ip_header_name = match &ip_header.0 {
|
||||||
|
Some(h) => h,
|
||||||
|
_ => ""
|
||||||
|
};
|
||||||
|
|
||||||
|
let diagnostics_json = json!({
|
||||||
|
"dns_resolved": dns_resolved,
|
||||||
|
"web_vault_version": web_vault_version.version,
|
||||||
|
"latest_release": latest_release,
|
||||||
|
"latest_commit": latest_commit,
|
||||||
|
"latest_web_build": latest_web_build,
|
||||||
|
"running_within_docker": running_within_docker,
|
||||||
|
"has_http_access": has_http_access,
|
||||||
|
"ip_header_exists": &ip_header.0.is_some(),
|
||||||
|
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
||||||
|
"ip_header_name": ip_header_name,
|
||||||
|
"ip_header_config": &CONFIG.ip_header(),
|
||||||
|
"uses_proxy": uses_proxy,
|
||||||
|
"db_type": *DB_TYPE,
|
||||||
|
"db_version": get_sql_server_version(&conn),
|
||||||
|
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||||
|
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||||
|
});
|
||||||
|
|
||||||
|
let text = AdminTemplateData::diagnostics(diagnostics_json).render()?;
|
||||||
|
Ok(Html(text))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/diagnostics/config")]
|
||||||
|
fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
|
||||||
|
let support_json = CONFIG.get_support_json();
|
||||||
|
Json(support_json)
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/config", data = "<data>")]
|
#[post("/config", data = "<data>")]
|
||||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||||
let data: ConfigBuilder = data.into_inner();
|
let data: ConfigBuilder = data.into_inner();
|
||||||
|
@@ -1,19 +1,16 @@
|
|||||||
use rocket_contrib::json::Json;
|
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
|
use rocket_contrib::json::Json;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::db::models::*;
|
use crate::{
|
||||||
use crate::db::DbConn;
|
api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
|
||||||
|
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
||||||
|
crypto,
|
||||||
|
db::{models::*, DbConn},
|
||||||
|
mail, CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType};
|
pub fn routes() -> Vec<rocket::Route> {
|
||||||
use crate::auth::{decode_invite, decode_delete, decode_verify_email, Headers};
|
|
||||||
use crate::mail;
|
|
||||||
use crate::crypto;
|
|
||||||
|
|
||||||
use crate::CONFIG;
|
|
||||||
|
|
||||||
use rocket::Route;
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
|
||||||
routes![
|
routes![
|
||||||
register,
|
register,
|
||||||
profile,
|
profile,
|
||||||
@@ -36,6 +33,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
revision_date,
|
revision_date,
|
||||||
password_hint,
|
password_hint,
|
||||||
prelogin,
|
prelogin,
|
||||||
|
verify_password,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,7 +66,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
let mut user = match User::find_by_mail(&data.Email, &conn) {
|
let mut user = match User::find_by_mail(&data.Email, &conn) {
|
||||||
Some(user) => {
|
Some(user) => {
|
||||||
if !user.password_hash.is_empty() {
|
if !user.password_hash.is_empty() {
|
||||||
if CONFIG.signups_allowed() {
|
if CONFIG.is_signup_allowed(&data.Email) {
|
||||||
err!("User already exists")
|
err!("User already exists")
|
||||||
} else {
|
} else {
|
||||||
err!("Registration not allowed or user already exists")
|
err!("Registration not allowed or user already exists")
|
||||||
@@ -89,14 +87,17 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
user
|
user
|
||||||
} else if CONFIG.signups_allowed() {
|
} else if CONFIG.is_signup_allowed(&data.Email) {
|
||||||
err!("Account with this email already exists")
|
err!("Account with this email already exists")
|
||||||
} else {
|
} else {
|
||||||
err!("Registration not allowed or user already exists")
|
err!("Registration not allowed or user already exists")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
if CONFIG.signups_allowed() || Invitation::take(&data.Email, &conn) || CONFIG.can_signup_user(&data.Email) {
|
// Order is important here; the invitation check must come first
|
||||||
|
// because the bitwarden_rs admin can invite anyone, regardless
|
||||||
|
// of other signup restrictions.
|
||||||
|
if Invitation::take(&data.Email, &conn) || CONFIG.is_signup_allowed(&data.Email) {
|
||||||
User::new(data.Email.clone())
|
User::new(data.Email.clone())
|
||||||
} else {
|
} else {
|
||||||
err!("Registration not allowed or user already exists")
|
err!("Registration not allowed or user already exists")
|
||||||
@@ -115,7 +116,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
user.client_kdf_type = client_kdf_type;
|
user.client_kdf_type = client_kdf_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
user.set_password(&data.MasterPasswordHash);
|
user.set_password(&data.MasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
|
|
||||||
// Add extra fields if present
|
// Add extra fields if present
|
||||||
@@ -139,19 +140,17 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
user.last_verifying_at = Some(user.created_at);
|
user.last_verifying_at = Some(user.created_at);
|
||||||
} else {
|
} else if let Err(e) = mail::send_welcome(&user.email) {
|
||||||
if let Err(e) = mail::send_welcome(&user.email) {
|
|
||||||
error!("Error sending welcome email: {:#?}", e);
|
error!("Error sending welcome email: {:#?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/accounts/profile")]
|
#[get("/accounts/profile")]
|
||||||
fn profile(headers: Headers, conn: DbConn) -> JsonResult {
|
fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
Ok(Json(headers.user.to_json(&conn)))
|
Json(headers.user.to_json(&conn))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
@@ -207,7 +206,12 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
|
|||||||
user.public_key = Some(data.PublicKey);
|
user.public_key = Some(data.PublicKey);
|
||||||
|
|
||||||
user.save(&conn)?;
|
user.save(&conn)?;
|
||||||
Ok(Json(user.to_json(&conn)))
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"PrivateKey": user.private_key,
|
||||||
|
"PublicKey": user.public_key,
|
||||||
|
"Object":"keys"
|
||||||
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -227,7 +231,7 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
|||||||
err!("Invalid password")
|
err!("Invalid password")
|
||||||
}
|
}
|
||||||
|
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash, Some("post_rotatekey"));
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
@@ -254,7 +258,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
|||||||
|
|
||||||
user.client_kdf_iter = data.KdfIterations;
|
user.client_kdf_iter = data.KdfIterations;
|
||||||
user.client_kdf_type = data.Kdf;
|
user.client_kdf_type = data.Kdf;
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
@@ -333,6 +337,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
|||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.private_key = Some(data.PrivateKey);
|
user.private_key = Some(data.PrivateKey);
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
|
user.reset_stamp_exception();
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
@@ -371,8 +376,8 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
|||||||
err!("Email already in use");
|
err!("Email already in use");
|
||||||
}
|
}
|
||||||
|
|
||||||
if !CONFIG.signups_allowed() && !CONFIG.can_signup_user(&data.NewEmail) {
|
if !CONFIG.is_email_domain_allowed(&data.NewEmail) {
|
||||||
err!("Email cannot be changed to this address");
|
err!("Email domain not allowed");
|
||||||
}
|
}
|
||||||
|
|
||||||
let token = crypto::generate_token(6)?;
|
let token = crypto::generate_token(6)?;
|
||||||
@@ -414,20 +419,21 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
|||||||
|
|
||||||
match user.email_new {
|
match user.email_new {
|
||||||
Some(ref val) => {
|
Some(ref val) => {
|
||||||
if *val != data.NewEmail.to_string() {
|
if val != &data.NewEmail {
|
||||||
err!("Email change mismatch");
|
err!("Email change mismatch");
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
None => err!("No email change pending"),
|
None => err!("No email change pending"),
|
||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
// Only check the token if we sent out an email...
|
// Only check the token if we sent out an email...
|
||||||
match user.email_new_token {
|
match user.email_new_token {
|
||||||
Some(ref val) =>
|
Some(ref val) => {
|
||||||
if *val != data.Token.into_string() {
|
if *val != data.Token.into_string() {
|
||||||
err!("Token mismatch");
|
err!("Token mismatch");
|
||||||
}
|
}
|
||||||
|
}
|
||||||
None => err!("No email change pending"),
|
None => err!("No email change pending"),
|
||||||
}
|
}
|
||||||
user.verified_at = Some(Utc::now().naive_utc());
|
user.verified_at = Some(Utc::now().naive_utc());
|
||||||
@@ -439,7 +445,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
|||||||
user.email_new = None;
|
user.email_new = None;
|
||||||
user.email_new_token = None;
|
user.email_new_token = None;
|
||||||
|
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
@@ -454,7 +460,7 @@ fn post_verify_email(headers: Headers, _conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
||||||
error!("Error sending delete account email: {:#?}", e);
|
error!("Error sending verify_email email: {:#?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -480,11 +486,9 @@ fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: DbConn)
|
|||||||
Ok(claims) => claims,
|
Ok(claims) => claims,
|
||||||
Err(_) => err!("Invalid claim"),
|
Err(_) => err!("Invalid claim"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if claims.sub != user.uuid {
|
if claims.sub != user.uuid {
|
||||||
err!("Invalid claim");
|
err!("Invalid claim");
|
||||||
}
|
}
|
||||||
|
|
||||||
user.verified_at = Some(Utc::now().naive_utc());
|
user.verified_at = Some(Utc::now().naive_utc());
|
||||||
user.last_verifying_at = None;
|
user.last_verifying_at = None;
|
||||||
user.login_verify_count = 0;
|
user.login_verify_count = 0;
|
||||||
@@ -501,7 +505,7 @@ struct DeleteRecoverData {
|
|||||||
Email: String,
|
Email: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/delete-recover", data="<data>")]
|
#[post("/accounts/delete-recover", data = "<data>")]
|
||||||
fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, conn: DbConn) -> EmptyResult {
|
fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, conn: DbConn) -> EmptyResult {
|
||||||
let data: DeleteRecoverData = data.into_inner().data;
|
let data: DeleteRecoverData = data.into_inner().data;
|
||||||
|
|
||||||
@@ -530,7 +534,7 @@ struct DeleteRecoverTokenData {
|
|||||||
Token: String,
|
Token: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/delete-recover-token", data="<data>")]
|
#[post("/accounts/delete-recover-token", data = "<data>")]
|
||||||
fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbConn) -> EmptyResult {
|
fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbConn) -> EmptyResult {
|
||||||
let data: DeleteRecoverTokenData = data.into_inner().data;
|
let data: DeleteRecoverTokenData = data.into_inner().data;
|
||||||
|
|
||||||
@@ -543,11 +547,9 @@ fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbC
|
|||||||
Ok(claims) => claims,
|
Ok(claims) => claims,
|
||||||
Err(_) => err!("Invalid claim"),
|
Err(_) => err!("Invalid claim"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if claims.sub != user.uuid {
|
if claims.sub != user.uuid {
|
||||||
err!("Invalid claim");
|
err!("Invalid claim");
|
||||||
}
|
}
|
||||||
|
|
||||||
user.delete(&conn)
|
user.delete(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -609,7 +611,7 @@ struct PreloginData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/prelogin", data = "<data>")]
|
#[post("/accounts/prelogin", data = "<data>")]
|
||||||
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||||
let data: PreloginData = data.into_inner().data;
|
let data: PreloginData = data.into_inner().data;
|
||||||
|
|
||||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
||||||
@@ -617,8 +619,25 @@ fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
|||||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Kdf": kdf_type,
|
"Kdf": kdf_type,
|
||||||
"KdfIterations": kdf_iter
|
"KdfIterations": kdf_iter
|
||||||
})))
|
}))
|
||||||
|
}
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct VerifyPasswordData {
|
||||||
|
MasterPasswordHash: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/verify-password", data = "<data>")]
|
||||||
|
fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||||
|
let data: VerifyPasswordData = data.into_inner().data;
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
|
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
|
err!("Invalid password")
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@@ -1,28 +1,33 @@
|
|||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use rocket::http::ContentType;
|
use chrono::{NaiveDateTime, Utc};
|
||||||
use rocket::{request::Form, Data, Route};
|
use rocket::{http::ContentType, request::Form, Data, Route};
|
||||||
|
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use multipart::server::save::SavedData;
|
|
||||||
use multipart::server::{Multipart, SaveResult};
|
|
||||||
|
|
||||||
use data_encoding::HEXLOWER;
|
use data_encoding::HEXLOWER;
|
||||||
|
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||||
|
|
||||||
use crate::db::models::*;
|
use crate::{
|
||||||
use crate::db::DbConn;
|
api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
||||||
|
auth::Headers,
|
||||||
use crate::crypto;
|
crypto,
|
||||||
|
db::{models::*, DbConn},
|
||||||
use crate::api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType};
|
CONFIG,
|
||||||
use crate::auth::Headers;
|
};
|
||||||
|
|
||||||
use crate::CONFIG;
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
|
// Note that many routes have an `admin` variant; this seems to be
|
||||||
|
// because the stored procedure that upstream Bitwarden uses to determine
|
||||||
|
// whether the user can edit a cipher doesn't take into account whether
|
||||||
|
// the user is an org owner/admin. The `admin` variant first checks
|
||||||
|
// whether the user is an owner/admin of the relevant org, and if so,
|
||||||
|
// allows the operation unconditionally.
|
||||||
|
//
|
||||||
|
// bitwarden_rs factors in the org owner/admin status as part of
|
||||||
|
// determining the write accessibility of a cipher, so most
|
||||||
|
// admin/non-admin implementations can be shared.
|
||||||
routes![
|
routes![
|
||||||
sync,
|
sync,
|
||||||
get_ciphers,
|
get_ciphers,
|
||||||
@@ -44,15 +49,24 @@ pub fn routes() -> Vec<Route> {
|
|||||||
post_cipher_admin,
|
post_cipher_admin,
|
||||||
post_cipher_share,
|
post_cipher_share,
|
||||||
put_cipher_share,
|
put_cipher_share,
|
||||||
put_cipher_share_seleted,
|
put_cipher_share_selected,
|
||||||
post_cipher,
|
post_cipher,
|
||||||
put_cipher,
|
put_cipher,
|
||||||
delete_cipher_post,
|
delete_cipher_post,
|
||||||
delete_cipher_post_admin,
|
delete_cipher_post_admin,
|
||||||
|
delete_cipher_put,
|
||||||
|
delete_cipher_put_admin,
|
||||||
delete_cipher,
|
delete_cipher,
|
||||||
delete_cipher_admin,
|
delete_cipher_admin,
|
||||||
delete_cipher_selected,
|
delete_cipher_selected,
|
||||||
delete_cipher_selected_post,
|
delete_cipher_selected_post,
|
||||||
|
delete_cipher_selected_put,
|
||||||
|
delete_cipher_selected_admin,
|
||||||
|
delete_cipher_selected_post_admin,
|
||||||
|
delete_cipher_selected_put_admin,
|
||||||
|
restore_cipher_put,
|
||||||
|
restore_cipher_put_admin,
|
||||||
|
restore_cipher_selected,
|
||||||
delete_all,
|
delete_all,
|
||||||
move_cipher_selected,
|
move_cipher_selected,
|
||||||
move_cipher_selected_put,
|
move_cipher_selected_put,
|
||||||
@@ -70,51 +84,64 @@ struct SyncData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/sync?<data..>")]
|
#[get("/sync?<data..>")]
|
||||||
fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let user_json = headers.user.to_json(&conn);
|
let user_json = headers.user.to_json(&conn);
|
||||||
|
|
||||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||||
|
|
||||||
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
||||||
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
|
let collections_json: Vec<Value> = collections.iter()
|
||||||
|
.map(|c| c.to_json_details(&headers.user.uuid, &conn))
|
||||||
|
.collect();
|
||||||
|
|
||||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
|
||||||
|
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
|
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||||
let ciphers_json: Vec<Value> = ciphers
|
let ciphers_json: Vec<Value> = ciphers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
let sends = Send::find_by_user(&headers.user.uuid, &conn);
|
||||||
|
let sends_json: Vec<Value> = sends
|
||||||
|
.iter()
|
||||||
|
.map(|s| s.to_json())
|
||||||
|
.collect();
|
||||||
|
|
||||||
let domains_json = if data.exclude_domains {
|
let domains_json = if data.exclude_domains {
|
||||||
Value::Null
|
Value::Null
|
||||||
} else {
|
} else {
|
||||||
api::core::_get_eq_domains(headers, true).unwrap().into_inner()
|
api::core::_get_eq_domains(headers, true).into_inner()
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Profile": user_json,
|
"Profile": user_json,
|
||||||
"Folders": folders_json,
|
"Folders": folders_json,
|
||||||
"Collections": collections_json,
|
"Collections": collections_json,
|
||||||
|
"Policies": policies_json,
|
||||||
"Ciphers": ciphers_json,
|
"Ciphers": ciphers_json,
|
||||||
"Domains": domains_json,
|
"Domains": domains_json,
|
||||||
|
"Sends": sends_json,
|
||||||
"Object": "sync"
|
"Object": "sync"
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/ciphers")]
|
#[get("/ciphers")]
|
||||||
fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_ciphers(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||||
|
|
||||||
let ciphers_json: Vec<Value> = ciphers
|
let ciphers_json: Vec<Value> = ciphers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": ciphers_json,
|
"Data": ciphers_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null
|
"ContinuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/ciphers/<uuid>")]
|
#[get("/ciphers/<uuid>")]
|
||||||
@@ -177,6 +204,14 @@ pub struct CipherData {
|
|||||||
#[serde(rename = "Attachments")]
|
#[serde(rename = "Attachments")]
|
||||||
_Attachments: Option<Value>, // Unused, contains map of {id: filename}
|
_Attachments: Option<Value>, // Unused, contains map of {id: filename}
|
||||||
Attachments2: Option<HashMap<String, Attachments2Data>>,
|
Attachments2: Option<HashMap<String, Attachments2Data>>,
|
||||||
|
|
||||||
|
// The revision datetime (in ISO 8601 format) of the client's local copy
|
||||||
|
// of the cipher. This is used to prevent a client from updating a cipher
|
||||||
|
// when it doesn't have the latest version, as that can result in data
|
||||||
|
// loss. It's not an error when no value is provided; this can happen
|
||||||
|
// when using older client versions, or if the operation doesn't involve
|
||||||
|
// updating an existing cipher.
|
||||||
|
LastKnownRevisionDate: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
@@ -186,22 +221,46 @@ pub struct Attachments2Data {
|
|||||||
Key: String,
|
Key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Called when an org admin clones an org cipher.
|
||||||
#[post("/ciphers/admin", data = "<data>")]
|
#[post("/ciphers/admin", data = "<data>")]
|
||||||
fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
let data: ShareCipherData = data.into_inner().data;
|
post_ciphers_create(data, headers, conn, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Called when creating a new org-owned cipher, or cloning a cipher (whether
|
||||||
|
/// user- or org-owned). When cloning a cipher to a user-owned cipher,
|
||||||
|
/// `organizationId` is null.
|
||||||
|
#[post("/ciphers/create", data = "<data>")]
|
||||||
|
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
let mut data: ShareCipherData = data.into_inner().data;
|
||||||
|
|
||||||
|
// Check if there are one more more collections selected when this cipher is part of an organization.
|
||||||
|
// err if this is not the case before creating an empty cipher.
|
||||||
|
if data.Cipher.OrganizationId.is_some() && data.CollectionIds.is_empty() {
|
||||||
|
err!("You must select at least one collection.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// This check is usually only needed in update_cipher_from_data(), but we
|
||||||
|
// need it here as well to avoid creating an empty cipher in the call to
|
||||||
|
// cipher.save() below.
|
||||||
|
enforce_personal_ownership_policy(&data.Cipher, &headers, &conn)?;
|
||||||
|
|
||||||
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
|
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
|
||||||
cipher.user_uuid = Some(headers.user.uuid.clone());
|
cipher.user_uuid = Some(headers.user.uuid.clone());
|
||||||
cipher.save(&conn)?;
|
cipher.save(&conn)?;
|
||||||
|
|
||||||
|
// When cloning a cipher, the Bitwarden clients seem to set this field
|
||||||
|
// based on the cipher being cloned (when creating a new cipher, it's set
|
||||||
|
// to null as expected). However, `cipher.created_at` is initialized to
|
||||||
|
// the current time, so the stale data check will end up failing down the
|
||||||
|
// line. Since this function only creates new ciphers (whether by cloning
|
||||||
|
// or otherwise), we can just ignore this field entirely.
|
||||||
|
data.Cipher.LastKnownRevisionDate = None;
|
||||||
|
|
||||||
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt)
|
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/ciphers/create", data = "<data>")]
|
/// Called when creating a new user-owned cipher.
|
||||||
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
|
||||||
post_ciphers_admin(data, headers, conn, nt)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[post("/ciphers", data = "<data>")]
|
#[post("/ciphers", data = "<data>")]
|
||||||
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
let data: CipherData = data.into_inner().data;
|
let data: CipherData = data.into_inner().data;
|
||||||
@@ -212,6 +271,29 @@ fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt
|
|||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
|
||||||
|
/// A non-owner/admin user belonging to an org with the personal ownership policy
|
||||||
|
/// enabled isn't allowed to create new user-owned ciphers or modify existing ones
|
||||||
|
/// (that were created before the policy was applicable to the user). The user is
|
||||||
|
/// allowed to delete or share such ciphers to an org, however.
|
||||||
|
///
|
||||||
|
/// Ref: https://bitwarden.com/help/article/policies/#personal-ownership
|
||||||
|
fn enforce_personal_ownership_policy(
|
||||||
|
data: &CipherData,
|
||||||
|
headers: &Headers,
|
||||||
|
conn: &DbConn
|
||||||
|
) -> EmptyResult {
|
||||||
|
if data.OrganizationId.is_none() {
|
||||||
|
let user_uuid = &headers.user.uuid;
|
||||||
|
let policy_type = OrgPolicyType::PersonalOwnership;
|
||||||
|
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||||
|
err!("Due to an Enterprise Policy, you are restricted from \
|
||||||
|
saving items to your personal vault.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn update_cipher_from_data(
|
pub fn update_cipher_from_data(
|
||||||
cipher: &mut Cipher,
|
cipher: &mut Cipher,
|
||||||
data: CipherData,
|
data: CipherData,
|
||||||
@@ -221,6 +303,19 @@ pub fn update_cipher_from_data(
|
|||||||
nt: &Notify,
|
nt: &Notify,
|
||||||
ut: UpdateType,
|
ut: UpdateType,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
|
enforce_personal_ownership_policy(&data, headers, conn)?;
|
||||||
|
|
||||||
|
// Check that the client isn't updating an existing cipher with stale data.
|
||||||
|
if let Some(dt) = data.LastKnownRevisionDate {
|
||||||
|
match NaiveDateTime::parse_from_str(&dt, "%+") { // ISO 8601 format
|
||||||
|
Err(err) =>
|
||||||
|
warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||||
|
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 =>
|
||||||
|
err!("The client copy of this cipher is out of date. Resync the client and try again."),
|
||||||
|
Ok(_) => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
|
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
|
||||||
err!("Organization mismatch. Please resync the client before updating the cipher")
|
err!("Organization mismatch. Please resync the client before updating the cipher")
|
||||||
}
|
}
|
||||||
@@ -234,6 +329,11 @@ pub fn update_cipher_from_data(
|
|||||||
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn)
|
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn)
|
||||||
{
|
{
|
||||||
cipher.organization_uuid = Some(org_id);
|
cipher.organization_uuid = Some(org_id);
|
||||||
|
// After some discussion in PR #1329 re-added the user_uuid = None again.
|
||||||
|
// TODO: Audit/Check the whole save/update cipher chain.
|
||||||
|
// Upstream uses the user_uuid to allow a cipher added by a user to an org to still allow the user to view/edit the cipher
|
||||||
|
// even when the user has hide-passwords configured as there policy.
|
||||||
|
// Removing the line below would fix that, but we have to check which effect this would have on the rest of the code.
|
||||||
cipher.user_uuid = None;
|
cipher.user_uuid = None;
|
||||||
} else {
|
} else {
|
||||||
err!("You don't have permission to add cipher directly to organization")
|
err!("You don't have permission to add cipher directly to organization")
|
||||||
@@ -264,7 +364,10 @@ pub fn update_cipher_from_data(
|
|||||||
};
|
};
|
||||||
|
|
||||||
if saved_att.cipher_uuid != cipher.uuid {
|
if saved_att.cipher_uuid != cipher.uuid {
|
||||||
err!("Attachment is not owned by the cipher")
|
// Warn and break here since cloning ciphers provides attachment data but will not be cloned.
|
||||||
|
// If we error out here it will break the whole cloning and causes empty ciphers to appear.
|
||||||
|
warn!("Attachment is not owned by the cipher");
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
saved_att.akey = Some(attachment.Key);
|
saved_att.akey = Some(attachment.Key);
|
||||||
@@ -274,6 +377,23 @@ pub fn update_cipher_from_data(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cleanup cipher data, like removing the 'Response' key.
|
||||||
|
// This key is somewhere generated during Javascript so no way for us this fix this.
|
||||||
|
// Also, upstream only retrieves keys they actually want to store, and thus skip the 'Response' key.
|
||||||
|
// We do not mind which data is in it, the keep our model more flexible when there are upstream changes.
|
||||||
|
// But, we at least know we do not need to store and return this specific key.
|
||||||
|
fn _clean_cipher_data(mut json_data: Value) -> Value {
|
||||||
|
if json_data.is_array() {
|
||||||
|
json_data.as_array_mut()
|
||||||
|
.unwrap()
|
||||||
|
.iter_mut()
|
||||||
|
.for_each(|ref mut f| {
|
||||||
|
f.as_object_mut().unwrap().remove("Response");
|
||||||
|
});
|
||||||
|
};
|
||||||
|
json_data
|
||||||
|
}
|
||||||
|
|
||||||
let type_data_opt = match data.Type {
|
let type_data_opt = match data.Type {
|
||||||
1 => data.Login,
|
1 => data.Login,
|
||||||
2 => data.SecureNote,
|
2 => data.SecureNote,
|
||||||
@@ -282,29 +402,28 @@ pub fn update_cipher_from_data(
|
|||||||
_ => err!("Invalid type"),
|
_ => err!("Invalid type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut type_data = match type_data_opt {
|
let type_data = match type_data_opt {
|
||||||
Some(data) => data,
|
Some(mut data) => {
|
||||||
|
// Remove the 'Response' key from the base object.
|
||||||
|
data.as_object_mut().unwrap().remove("Response");
|
||||||
|
// Remove the 'Response' key from every Uri.
|
||||||
|
if data["Uris"].is_array() {
|
||||||
|
data["Uris"] = _clean_cipher_data(data["Uris"].clone());
|
||||||
|
}
|
||||||
|
data
|
||||||
|
},
|
||||||
None => err!("Data missing"),
|
None => err!("Data missing"),
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: ******* Backwards compat start **********
|
|
||||||
// To remove backwards compatibility, just delete this code,
|
|
||||||
// and remove the compat code from cipher::to_json
|
|
||||||
type_data["Name"] = Value::String(data.Name.clone());
|
|
||||||
type_data["Notes"] = data.Notes.clone().map(Value::String).unwrap_or(Value::Null);
|
|
||||||
type_data["Fields"] = data.Fields.clone().unwrap_or(Value::Null);
|
|
||||||
type_data["PasswordHistory"] = data.PasswordHistory.clone().unwrap_or(Value::Null);
|
|
||||||
// TODO: ******* Backwards compat end **********
|
|
||||||
|
|
||||||
cipher.favorite = data.Favorite.unwrap_or(false);
|
|
||||||
cipher.name = data.Name;
|
cipher.name = data.Name;
|
||||||
cipher.notes = data.Notes;
|
cipher.notes = data.Notes;
|
||||||
cipher.fields = data.Fields.map(|f| f.to_string());
|
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string() );
|
||||||
cipher.data = type_data.to_string();
|
cipher.data = type_data.to_string();
|
||||||
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
||||||
|
|
||||||
cipher.save(&conn)?;
|
cipher.save(&conn)?;
|
||||||
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?;
|
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?;
|
||||||
|
cipher.set_favorite(data.Favorite, &headers.user.uuid, &conn)?;
|
||||||
|
|
||||||
if ut != UpdateType::None {
|
if ut != UpdateType::None {
|
||||||
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
||||||
@@ -367,6 +486,7 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Called when an org admin modifies an existing org cipher.
|
||||||
#[put("/ciphers/<uuid>/admin", data = "<data>")]
|
#[put("/ciphers/<uuid>/admin", data = "<data>")]
|
||||||
fn put_cipher_admin(
|
fn put_cipher_admin(
|
||||||
uuid: String,
|
uuid: String,
|
||||||
@@ -403,6 +523,11 @@ fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn
|
|||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// TODO: Check if only the folder ID or favorite status is being changed.
|
||||||
|
// These are per-user properties that technically aren't part of the
|
||||||
|
// cipher itself, so the user shouldn't need write access to change these.
|
||||||
|
// Interestingly, upstream Bitwarden doesn't properly handle this either.
|
||||||
|
|
||||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||||
err!("Cipher is not write accessible")
|
err!("Cipher is not write accessible")
|
||||||
}
|
}
|
||||||
@@ -536,7 +661,7 @@ struct ShareSelectedCipherData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[put("/ciphers/share", data = "<data>")]
|
#[put("/ciphers/share", data = "<data>")]
|
||||||
fn put_cipher_share_seleted(
|
fn put_cipher_share_selected(
|
||||||
data: JsonUpcase<ShareSelectedCipherData>,
|
data: JsonUpcase<ShareSelectedCipherData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
@@ -599,10 +724,13 @@ fn share_cipher_by_uuid(
|
|||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
match data.Cipher.OrganizationId.clone() {
|
|
||||||
None => err!("Organization id not provided"),
|
|
||||||
Some(organization_uuid) => {
|
|
||||||
let mut shared_to_collection = false;
|
let mut shared_to_collection = false;
|
||||||
|
|
||||||
|
match data.Cipher.OrganizationId.clone() {
|
||||||
|
// If we don't get an organization ID, we don't do anything
|
||||||
|
// No error because this is used when using the Clone functionality
|
||||||
|
None => {}
|
||||||
|
Some(organization_uuid) => {
|
||||||
for uuid in &data.CollectionIds {
|
for uuid in &data.CollectionIds {
|
||||||
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
|
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
|
||||||
None => err!("Invalid collection ID provided"),
|
None => err!("Invalid collection ID provided"),
|
||||||
@@ -616,6 +744,9 @@ fn share_cipher_by_uuid(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
update_cipher_from_data(
|
update_cipher_from_data(
|
||||||
&mut cipher,
|
&mut cipher,
|
||||||
data.Cipher,
|
data.Cipher,
|
||||||
@@ -627,8 +758,6 @@ fn share_cipher_by_uuid(
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
|
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
|
||||||
@@ -642,20 +771,49 @@ fn post_attachment(
|
|||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||||
Some(cipher) => cipher,
|
Some(cipher) => cipher,
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err_discard!("Cipher doesn't exist", data),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||||
err!("Cipher is not write accessible")
|
err_discard!("Cipher is not write accessible", data)
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut params = content_type.params();
|
let mut params = content_type.params();
|
||||||
let boundary_pair = params.next().expect("No boundary provided");
|
let boundary_pair = params.next().expect("No boundary provided");
|
||||||
let boundary = boundary_pair.1;
|
let boundary = boundary_pair.1;
|
||||||
|
|
||||||
|
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
||||||
|
match CONFIG.user_attachment_limit() {
|
||||||
|
Some(0) => err_discard!("Attachments are disabled", data),
|
||||||
|
Some(limit_kb) => {
|
||||||
|
let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &conn);
|
||||||
|
if left <= 0 {
|
||||||
|
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
||||||
|
}
|
||||||
|
Some(left as u64)
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
} else if let Some(ref org_uuid) = cipher.organization_uuid {
|
||||||
|
match CONFIG.org_attachment_limit() {
|
||||||
|
Some(0) => err_discard!("Attachments are disabled", data),
|
||||||
|
Some(limit_kb) => {
|
||||||
|
let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &conn);
|
||||||
|
if left <= 0 {
|
||||||
|
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
||||||
|
}
|
||||||
|
Some(left as u64)
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err_discard!("Cipher is neither owned by a user nor an organization", data);
|
||||||
|
};
|
||||||
|
|
||||||
let base_path = Path::new(&CONFIG.attachments_folder()).join(&cipher.uuid);
|
let base_path = Path::new(&CONFIG.attachments_folder()).join(&cipher.uuid);
|
||||||
|
|
||||||
let mut attachment_key = None;
|
let mut attachment_key = None;
|
||||||
|
let mut error = None;
|
||||||
|
|
||||||
Multipart::with_body(data.open(), boundary)
|
Multipart::with_body(data.open(), boundary)
|
||||||
.foreach_entry(|mut field| {
|
.foreach_entry(|mut field| {
|
||||||
@@ -674,18 +832,21 @@ fn post_attachment(
|
|||||||
let file_name = HEXLOWER.encode(&crypto::get_random(vec![0; 10]));
|
let file_name = HEXLOWER.encode(&crypto::get_random(vec![0; 10]));
|
||||||
let path = base_path.join(&file_name);
|
let path = base_path.join(&file_name);
|
||||||
|
|
||||||
let size = match field.data.save().memory_threshold(0).size_limit(None).with_path(path) {
|
let size = match field.data.save().memory_threshold(0).size_limit(size_limit).with_path(path.clone()) {
|
||||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||||
SaveResult::Full(other) => {
|
SaveResult::Full(other) => {
|
||||||
error!("Attachment is not a file: {:?}", other);
|
std::fs::remove_file(path).ok();
|
||||||
|
error = Some(format!("Attachment is not a file: {:?}", other));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
SaveResult::Partial(_, reason) => {
|
SaveResult::Partial(_, reason) => {
|
||||||
error!("Partial result: {:?}", reason);
|
std::fs::remove_file(path).ok();
|
||||||
|
error = Some(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
SaveResult::Error(e) => {
|
SaveResult::Error(e) => {
|
||||||
error!("Error: {:?}", e);
|
std::fs::remove_file(path).ok();
|
||||||
|
error = Some(format!("Error: {:?}", e));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -699,6 +860,10 @@ fn post_attachment(
|
|||||||
})
|
})
|
||||||
.expect("Error processing multipart data");
|
.expect("Error processing multipart data");
|
||||||
|
|
||||||
|
if let Some(ref e) = error {
|
||||||
|
err!(e);
|
||||||
|
}
|
||||||
|
|
||||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
@@ -716,11 +881,7 @@ fn post_attachment_admin(
|
|||||||
post_attachment(uuid, data, content_type, headers, conn, nt)
|
post_attachment(uuid, data, content_type, headers, conn, nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post(
|
#[post("/ciphers/<uuid>/attachment/<attachment_id>/share", format = "multipart/form-data", data = "<data>")]
|
||||||
"/ciphers/<uuid>/attachment/<attachment_id>/share",
|
|
||||||
format = "multipart/form-data",
|
|
||||||
data = "<data>"
|
|
||||||
)]
|
|
||||||
fn post_attachment_share(
|
fn post_attachment_share(
|
||||||
uuid: String,
|
uuid: String,
|
||||||
attachment_id: String,
|
attachment_id: String,
|
||||||
@@ -774,50 +935,79 @@ fn delete_attachment_admin(
|
|||||||
|
|
||||||
#[post("/ciphers/<uuid>/delete")]
|
#[post("/ciphers/<uuid>/delete")]
|
||||||
fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/ciphers/<uuid>/delete-admin")]
|
#[post("/ciphers/<uuid>/delete-admin")]
|
||||||
fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/<uuid>/delete")]
|
||||||
|
fn delete_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
_delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/<uuid>/delete-admin")]
|
||||||
|
fn delete_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
_delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/ciphers/<uuid>")]
|
#[delete("/ciphers/<uuid>")]
|
||||||
fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/ciphers/<uuid>/admin")]
|
#[delete("/ciphers/<uuid>/admin")]
|
||||||
fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/ciphers", data = "<data>")]
|
#[delete("/ciphers", data = "<data>")]
|
||||||
fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
let data: Value = data.into_inner().data;
|
_delete_multiple_ciphers(data, headers, conn, false, nt)
|
||||||
|
|
||||||
let uuids = match data.get("Ids") {
|
|
||||||
Some(ids) => match ids.as_array() {
|
|
||||||
Some(ids) => ids.iter().filter_map(Value::as_str),
|
|
||||||
None => err!("Posted ids field is not an array"),
|
|
||||||
},
|
|
||||||
None => err!("Request missing ids field"),
|
|
||||||
};
|
|
||||||
|
|
||||||
for uuid in uuids {
|
|
||||||
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, &nt) {
|
|
||||||
return error;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/ciphers/delete", data = "<data>")]
|
#[post("/ciphers/delete", data = "<data>")]
|
||||||
fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
_delete_multiple_ciphers(data, headers, conn, false, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/delete", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
_delete_multiple_ciphers(data, headers, conn, true, nt) // soft delete
|
||||||
|
}
|
||||||
|
|
||||||
|
#[delete("/ciphers/admin", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
delete_cipher_selected(data, headers, conn, nt)
|
delete_cipher_selected(data, headers, conn, nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[post("/ciphers/delete-admin", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_post_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
delete_cipher_selected_post(data, headers, conn, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/delete-admin", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_put_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
delete_cipher_selected_put(data, headers, conn, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/<uuid>/restore")]
|
||||||
|
fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/<uuid>/restore-admin")]
|
||||||
|
fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/restore", data = "<data>")]
|
||||||
|
fn restore_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
_restore_multiple_ciphers(data, &headers, &conn, &nt)
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct MoveCipherData {
|
struct MoveCipherData {
|
||||||
@@ -901,7 +1091,6 @@ fn delete_all(
|
|||||||
Some(user_org) => {
|
Some(user_org) => {
|
||||||
if user_org.atype == UserOrgType::Owner {
|
if user_org.atype == UserOrgType::Owner {
|
||||||
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
|
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
|
||||||
Collection::delete_all_by_organization(&org_data.org_id, &conn)?;
|
|
||||||
nt.send_user_update(UpdateType::Vault, &user);
|
nt.send_user_update(UpdateType::Vault, &user);
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
@@ -929,8 +1118,8 @@ fn delete_all(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult {
|
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, soft_delete: bool, nt: &Notify) -> EmptyResult {
|
||||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||||
Some(cipher) => cipher,
|
Some(cipher) => cipher,
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
@@ -939,11 +1128,81 @@ fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Not
|
|||||||
err!("Cipher can't be deleted by user")
|
err!("Cipher can't be deleted by user")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if soft_delete {
|
||||||
|
cipher.deleted_at = Some(Utc::now().naive_utc());
|
||||||
|
cipher.save(&conn)?;
|
||||||
|
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||||
|
} else {
|
||||||
cipher.delete(&conn)?;
|
cipher.delete(&conn)?;
|
||||||
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn _delete_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, soft_delete: bool, nt: Notify) -> EmptyResult {
|
||||||
|
let data: Value = data.into_inner().data;
|
||||||
|
|
||||||
|
let uuids = match data.get("Ids") {
|
||||||
|
Some(ids) => match ids.as_array() {
|
||||||
|
Some(ids) => ids.iter().filter_map(Value::as_str),
|
||||||
|
None => err!("Posted ids field is not an array"),
|
||||||
|
},
|
||||||
|
None => err!("Request missing ids field"),
|
||||||
|
};
|
||||||
|
|
||||||
|
for uuid in uuids {
|
||||||
|
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, soft_delete, &nt) {
|
||||||
|
return error;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||||
|
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||||
|
Some(cipher) => cipher,
|
||||||
|
None => err!("Cipher doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||||
|
err!("Cipher can't be restored by user")
|
||||||
|
}
|
||||||
|
|
||||||
|
cipher.deleted_at = None;
|
||||||
|
cipher.save(&conn)?;
|
||||||
|
|
||||||
|
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||||
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||||
|
let data: Value = data.into_inner().data;
|
||||||
|
|
||||||
|
let uuids = match data.get("Ids") {
|
||||||
|
Some(ids) => match ids.as_array() {
|
||||||
|
Some(ids) => ids.iter().filter_map(Value::as_str),
|
||||||
|
None => err!("Posted ids field is not an array"),
|
||||||
|
},
|
||||||
|
None => err!("Request missing ids field"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut ciphers: Vec<Value> = Vec::new();
|
||||||
|
for uuid in uuids {
|
||||||
|
match _restore_cipher_by_uuid(uuid, headers, conn, nt) {
|
||||||
|
Ok(json) => ciphers.push(json.into_inner()),
|
||||||
|
err => return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Data": ciphers,
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
fn _delete_cipher_attachment_by_id(
|
fn _delete_cipher_attachment_by_id(
|
||||||
uuid: &str,
|
uuid: &str,
|
||||||
attachment_id: &str,
|
attachment_id: &str,
|
||||||
|
@@ -1,15 +1,13 @@
|
|||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::db::models::*;
|
use crate::{
|
||||||
use crate::db::DbConn;
|
api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||||
|
auth::Headers,
|
||||||
|
db::{models::*, DbConn},
|
||||||
|
};
|
||||||
|
|
||||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType};
|
pub fn routes() -> Vec<rocket::Route> {
|
||||||
use crate::auth::Headers;
|
|
||||||
|
|
||||||
use rocket::Route;
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
|
||||||
routes![
|
routes![
|
||||||
get_folders,
|
get_folders,
|
||||||
get_folder,
|
get_folder,
|
||||||
@@ -22,16 +20,16 @@ pub fn routes() -> Vec<Route> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/folders")]
|
#[get("/folders")]
|
||||||
fn get_folders(headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||||
|
|
||||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": folders_json,
|
"Data": folders_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null,
|
"ContinuationToken": null,
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/folders/<uuid>")]
|
#[get("/folders/<uuid>")]
|
||||||
@@ -50,7 +48,6 @@ fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
|
|
||||||
pub struct FolderData {
|
pub struct FolderData {
|
||||||
pub Name: String,
|
pub Name: String,
|
||||||
}
|
}
|
||||||
|
@@ -2,7 +2,10 @@ mod accounts;
|
|||||||
mod ciphers;
|
mod ciphers;
|
||||||
mod folders;
|
mod folders;
|
||||||
mod organizations;
|
mod organizations;
|
||||||
pub(crate) mod two_factor;
|
pub mod two_factor;
|
||||||
|
mod sends;
|
||||||
|
|
||||||
|
pub use sends::start_send_deletion_scheduler;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
let mut mod_routes = routes![
|
let mut mod_routes = routes![
|
||||||
@@ -20,6 +23,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
routes.append(&mut folders::routes());
|
routes.append(&mut folders::routes());
|
||||||
routes.append(&mut organizations::routes());
|
routes.append(&mut organizations::routes());
|
||||||
routes.append(&mut two_factor::routes());
|
routes.append(&mut two_factor::routes());
|
||||||
|
routes.append(&mut sends::routes());
|
||||||
routes.append(&mut mod_routes);
|
routes.append(&mut mod_routes);
|
||||||
|
|
||||||
routes
|
routes
|
||||||
@@ -29,17 +33,19 @@ pub fn routes() -> Vec<Route> {
|
|||||||
// Move this somewhere else
|
// Move this somewhere else
|
||||||
//
|
//
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
|
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
|
use rocket::response::Response;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase};
|
use crate::{
|
||||||
use crate::auth::Headers;
|
api::{JsonResult, JsonUpcase},
|
||||||
use crate::db::DbConn;
|
auth::Headers,
|
||||||
use crate::error::Error;
|
db::DbConn,
|
||||||
|
error::Error,
|
||||||
|
};
|
||||||
|
|
||||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||||
fn clear_device_token(uuid: String) -> EmptyResult {
|
fn clear_device_token<'a>(uuid: String) -> Response<'a> {
|
||||||
// This endpoint doesn't have auth header
|
// This endpoint doesn't have auth header
|
||||||
|
|
||||||
let _ = uuid;
|
let _ = uuid;
|
||||||
@@ -48,11 +54,11 @@ fn clear_device_token(uuid: String) -> EmptyResult {
|
|||||||
// This only clears push token
|
// This only clears push token
|
||||||
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||||
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||||
Ok(())
|
Response::new()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||||
fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> JsonResult {
|
fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> Json<Value> {
|
||||||
let _data: Value = data.into_inner().data;
|
let _data: Value = data.into_inner().data;
|
||||||
// Data has a single string value "PushToken"
|
// Data has a single string value "PushToken"
|
||||||
let _ = uuid;
|
let _ = uuid;
|
||||||
@@ -60,13 +66,13 @@ fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) ->
|
|||||||
|
|
||||||
// TODO: This should save the push token, but we don't have push functionality
|
// TODO: This should save the push token, but we don't have push functionality
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Id": headers.device.uuid,
|
"Id": headers.device.uuid,
|
||||||
"Name": headers.device.name,
|
"Name": headers.device.name,
|
||||||
"Type": headers.device.atype,
|
"Type": headers.device.atype,
|
||||||
"Identifier": headers.device.uuid,
|
"Identifier": headers.device.uuid,
|
||||||
"CreationDate": crate::util::format_date(&headers.device.created_at),
|
"CreationDate": crate::util::format_date(&headers.device.created_at),
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
@@ -80,11 +86,11 @@ struct GlobalDomain {
|
|||||||
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
||||||
|
|
||||||
#[get("/settings/domains")]
|
#[get("/settings/domains")]
|
||||||
fn get_eq_domains(headers: Headers) -> JsonResult {
|
fn get_eq_domains(headers: Headers) -> Json<Value> {
|
||||||
_get_eq_domains(headers, false)
|
_get_eq_domains(headers, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _get_eq_domains(headers: Headers, no_excluded: bool) -> JsonResult {
|
fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
use serde_json::from_str;
|
use serde_json::from_str;
|
||||||
|
|
||||||
@@ -101,11 +107,11 @@ fn _get_eq_domains(headers: Headers, no_excluded: bool) -> JsonResult {
|
|||||||
globals.retain(|g| !g.Excluded);
|
globals.retain(|g| !g.Excluded);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"EquivalentDomains": equivalent_domains,
|
"EquivalentDomains": equivalent_domains,
|
||||||
"GlobalEquivalentDomains": globals,
|
"GlobalEquivalentDomains": globals,
|
||||||
"Object": "domains",
|
"Object": "domains",
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
@@ -146,14 +152,13 @@ fn hibp_breach(username: String) -> JsonResult {
|
|||||||
username
|
username
|
||||||
);
|
);
|
||||||
|
|
||||||
use reqwest::{header::USER_AGENT, Client};
|
use reqwest::{blocking::Client, header::USER_AGENT};
|
||||||
|
|
||||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||||
let hibp_client = Client::builder()
|
let hibp_client = Client::builder().build()?;
|
||||||
.use_sys_proxy()
|
|
||||||
.build()?;
|
|
||||||
|
|
||||||
let res = hibp_client.get(&url)
|
let res = hibp_client
|
||||||
|
.get(&url)
|
||||||
.header(USER_AGENT, user_agent)
|
.header(USER_AGENT, user_agent)
|
||||||
.header("hibp-api-key", api_key)
|
.header("hibp-api-key", api_key)
|
||||||
.send()?;
|
.send()?;
|
||||||
@@ -172,8 +177,8 @@ fn hibp_breach(username: String) -> JsonResult {
|
|||||||
"Domain": "haveibeenpwned.com",
|
"Domain": "haveibeenpwned.com",
|
||||||
"BreachDate": "2019-08-18T00:00:00Z",
|
"BreachDate": "2019-08-18T00:00:00Z",
|
||||||
"AddedDate": "2019-08-18T00:00:00Z",
|
"AddedDate": "2019-08-18T00:00:00Z",
|
||||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||||
"LogoPath": "/bwrs_static/hibp.png",
|
"LogoPath": "bwrs_static/hibp.png",
|
||||||
"PwnCount": 0,
|
"PwnCount": 0,
|
||||||
"DataClasses": [
|
"DataClasses": [
|
||||||
"Error - No API key set!"
|
"Error - No API key set!"
|
||||||
|
@@ -1,16 +1,14 @@
|
|||||||
use rocket::request::Form;
|
use num_traits::FromPrimitive;
|
||||||
use rocket::Route;
|
use rocket::{request::Form, Route};
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::api::{
|
use crate::{
|
||||||
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType,
|
api::{EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType},
|
||||||
|
auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders, ManagerHeaders, ManagerHeadersLoose},
|
||||||
|
db::{models::*, DbConn},
|
||||||
|
mail, CONFIG,
|
||||||
};
|
};
|
||||||
use crate::auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders};
|
|
||||||
use crate::db::models::*;
|
|
||||||
use crate::db::DbConn;
|
|
||||||
use crate::mail;
|
|
||||||
use crate::CONFIG;
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![
|
routes![
|
||||||
@@ -45,6 +43,14 @@ pub fn routes() -> Vec<Route> {
|
|||||||
delete_user,
|
delete_user,
|
||||||
post_delete_user,
|
post_delete_user,
|
||||||
post_org_import,
|
post_org_import,
|
||||||
|
list_policies,
|
||||||
|
list_policies_token,
|
||||||
|
get_policy,
|
||||||
|
put_policy,
|
||||||
|
get_organization_tax,
|
||||||
|
get_plans,
|
||||||
|
get_plans_tax_rates,
|
||||||
|
import,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,6 +80,10 @@ struct NewCollectionData {
|
|||||||
|
|
||||||
#[post("/organizations", data = "<data>")]
|
#[post("/organizations", data = "<data>")]
|
||||||
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
|
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
|
||||||
|
if !CONFIG.is_org_creation_allowed(&headers.user.email) {
|
||||||
|
err!("User not allowed to create organizations")
|
||||||
|
}
|
||||||
|
|
||||||
let data: OrgData = data.into_inner().data;
|
let data: OrgData = data.into_inner().data;
|
||||||
|
|
||||||
let org = Organization::new(data.Name, data.BillingEmail);
|
let org = Organization::new(data.Name, data.BillingEmail);
|
||||||
@@ -182,8 +192,8 @@ fn post_organization(
|
|||||||
|
|
||||||
// GET /api/collections?writeOnly=false
|
// GET /api/collections?writeOnly=false
|
||||||
#[get("/collections")]
|
#[get("/collections")]
|
||||||
fn get_user_collections(headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_user_collections(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data":
|
"Data":
|
||||||
Collection::find_by_user_uuid(&headers.user.uuid, &conn)
|
Collection::find_by_user_uuid(&headers.user.uuid, &conn)
|
||||||
.iter()
|
.iter()
|
||||||
@@ -191,12 +201,12 @@ fn get_user_collections(headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
.collect::<Value>(),
|
.collect::<Value>(),
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null,
|
"ContinuationToken": null,
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/collections")]
|
#[get("/organizations/<org_id>/collections")]
|
||||||
fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data":
|
"Data":
|
||||||
Collection::find_by_organization(&org_id, &conn)
|
Collection::find_by_organization(&org_id, &conn)
|
||||||
.iter()
|
.iter()
|
||||||
@@ -204,13 +214,13 @@ fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) ->
|
|||||||
.collect::<Value>(),
|
.collect::<Value>(),
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null,
|
"ContinuationToken": null,
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<org_id>/collections", data = "<data>")]
|
#[post("/organizations/<org_id>/collections", data = "<data>")]
|
||||||
fn post_organization_collections(
|
fn post_organization_collections(
|
||||||
org_id: String,
|
org_id: String,
|
||||||
_headers: AdminHeaders,
|
headers: ManagerHeadersLoose,
|
||||||
data: JsonUpcase<NewCollectionData>,
|
data: JsonUpcase<NewCollectionData>,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
@@ -221,9 +231,22 @@ fn post_organization_collections(
|
|||||||
None => err!("Can't find organization details"),
|
None => err!("Can't find organization details"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Get the user_organization record so that we can check if the user has access to all collections.
|
||||||
|
let user_org = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||||
|
Some(u) => u,
|
||||||
|
None => err!("User is not part of organization"),
|
||||||
|
};
|
||||||
|
|
||||||
let collection = Collection::new(org.uuid, data.Name);
|
let collection = Collection::new(org.uuid, data.Name);
|
||||||
collection.save(&conn)?;
|
collection.save(&conn)?;
|
||||||
|
|
||||||
|
// If the user doesn't have access to all collections, only in case of a Manger,
|
||||||
|
// then we need to save the creating user uuid (Manager) to the users_collection table.
|
||||||
|
// Else the user will not have access to his own created collection.
|
||||||
|
if !user_org.access_all {
|
||||||
|
CollectionUser::save(&headers.user.uuid, &collection.uuid, false, false, &conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Json(collection.to_json()))
|
Ok(Json(collection.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -231,7 +254,7 @@ fn post_organization_collections(
|
|||||||
fn put_organization_collection_update(
|
fn put_organization_collection_update(
|
||||||
org_id: String,
|
org_id: String,
|
||||||
col_id: String,
|
col_id: String,
|
||||||
headers: AdminHeaders,
|
headers: ManagerHeaders,
|
||||||
data: JsonUpcase<NewCollectionData>,
|
data: JsonUpcase<NewCollectionData>,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
@@ -242,7 +265,7 @@ fn put_organization_collection_update(
|
|||||||
fn post_organization_collection_update(
|
fn post_organization_collection_update(
|
||||||
org_id: String,
|
org_id: String,
|
||||||
col_id: String,
|
col_id: String,
|
||||||
_headers: AdminHeaders,
|
_headers: ManagerHeaders,
|
||||||
data: JsonUpcase<NewCollectionData>,
|
data: JsonUpcase<NewCollectionData>,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
@@ -310,7 +333,7 @@ fn post_organization_collection_delete_user(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/organizations/<org_id>/collections/<col_id>")]
|
#[delete("/organizations/<org_id>/collections/<col_id>")]
|
||||||
fn delete_organization_collection(org_id: String, col_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult {
|
fn delete_organization_collection(org_id: String, col_id: String, _headers: ManagerHeaders, conn: DbConn) -> EmptyResult {
|
||||||
match Collection::find_by_uuid(&col_id, &conn) {
|
match Collection::find_by_uuid(&col_id, &conn) {
|
||||||
None => err!("Collection not found"),
|
None => err!("Collection not found"),
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
@@ -334,7 +357,7 @@ struct DeleteCollectionData {
|
|||||||
fn post_organization_collection_delete(
|
fn post_organization_collection_delete(
|
||||||
org_id: String,
|
org_id: String,
|
||||||
col_id: String,
|
col_id: String,
|
||||||
headers: AdminHeaders,
|
headers: ManagerHeaders,
|
||||||
_data: JsonUpcase<DeleteCollectionData>,
|
_data: JsonUpcase<DeleteCollectionData>,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
@@ -342,7 +365,7 @@ fn post_organization_collection_delete(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/collections/<coll_id>/details")]
|
#[get("/organizations/<org_id>/collections/<coll_id>/details")]
|
||||||
fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_org_collection_detail(org_id: String, coll_id: String, headers: ManagerHeaders, conn: DbConn) -> JsonResult {
|
||||||
match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) {
|
match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) {
|
||||||
None => err!("Collection not found"),
|
None => err!("Collection not found"),
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
@@ -356,7 +379,7 @@ fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHead
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/collections/<coll_id>/users")]
|
#[get("/organizations/<org_id>/collections/<coll_id>/users")]
|
||||||
fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_collection_users(org_id: String, coll_id: String, _headers: ManagerHeaders, conn: DbConn) -> JsonResult {
|
||||||
// Get org and collection, check that collection is from org
|
// Get org and collection, check that collection is from org
|
||||||
let collection = match Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn) {
|
let collection = match Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn) {
|
||||||
None => err!("Collection not found in Organization"),
|
None => err!("Collection not found in Organization"),
|
||||||
@@ -369,7 +392,7 @@ fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders,
|
|||||||
.map(|col_user| {
|
.map(|col_user| {
|
||||||
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
|
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_json_collection_user_details(col_user.read_only)
|
.to_json_user_access_restrictions(&col_user)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
@@ -381,7 +404,7 @@ fn put_collection_users(
|
|||||||
org_id: String,
|
org_id: String,
|
||||||
coll_id: String,
|
coll_id: String,
|
||||||
data: JsonUpcaseVec<CollectionData>,
|
data: JsonUpcaseVec<CollectionData>,
|
||||||
_headers: AdminHeaders,
|
_headers: ManagerHeaders,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
// Get org and collection, check that collection is from org
|
// Get org and collection, check that collection is from org
|
||||||
@@ -403,7 +426,9 @@ fn put_collection_users(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
CollectionUser::save(&user.user_uuid, &coll_id, d.ReadOnly, &conn)?;
|
CollectionUser::save(&user.user_uuid, &coll_id,
|
||||||
|
d.ReadOnly, d.HidePasswords,
|
||||||
|
&conn)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -416,30 +441,30 @@ struct OrgIdData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/ciphers/organization-details?<data..>")]
|
#[get("/ciphers/organization-details?<data..>")]
|
||||||
fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
let ciphers = Cipher::find_by_org(&data.organization_id, &conn);
|
let ciphers = Cipher::find_by_org(&data.organization_id, &conn);
|
||||||
let ciphers_json: Vec<Value> = ciphers
|
let ciphers_json: Vec<Value> = ciphers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": ciphers_json,
|
"Data": ciphers_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null,
|
"ContinuationToken": null,
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/users")]
|
#[get("/organizations/<org_id>/users")]
|
||||||
fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_org_users(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> Json<Value> {
|
||||||
let users = UserOrganization::find_by_org(&org_id, &conn);
|
let users = UserOrganization::find_by_org(&org_id, &conn);
|
||||||
let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect();
|
let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": users_json,
|
"Data": users_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null,
|
"ContinuationToken": null,
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -447,6 +472,7 @@ fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonRe
|
|||||||
struct CollectionData {
|
struct CollectionData {
|
||||||
Id: String,
|
Id: String,
|
||||||
ReadOnly: bool,
|
ReadOnly: bool,
|
||||||
|
HidePasswords: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -480,7 +506,11 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
|||||||
let user = match User::find_by_mail(&email, &conn) {
|
let user = match User::find_by_mail(&email, &conn) {
|
||||||
None => {
|
None => {
|
||||||
if !CONFIG.invitations_allowed() {
|
if !CONFIG.invitations_allowed() {
|
||||||
err!(format!("User email does not exist: {}", email))
|
err!(format!("User does not exist: {}", email))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !CONFIG.is_email_domain_allowed(&email) {
|
||||||
|
err!("Email domain not eligible for invitations")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !CONFIG.mail_enabled() {
|
if !CONFIG.mail_enabled() {
|
||||||
@@ -514,7 +544,9 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
|||||||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||||
None => err!("Collection not found in Organization"),
|
None => err!("Collection not found in Organization"),
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, &conn)?;
|
CollectionUser::save(&user.uuid, &collection.uuid,
|
||||||
|
col.ReadOnly, col.HidePasswords,
|
||||||
|
&conn)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -623,7 +655,7 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
|
|||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
let mut org_name = String::from("bitwarden_rs");
|
let mut org_name = CONFIG.invitation_org_name();
|
||||||
if let Some(org_id) = &claims.org_id {
|
if let Some(org_id) = &claims.org_id {
|
||||||
org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
||||||
Some(org) => org.name,
|
Some(org) => org.name,
|
||||||
@@ -769,7 +801,9 @@ fn edit_user(
|
|||||||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||||
None => err!("Collection not found in Organization"),
|
None => err!("Collection not found in Organization"),
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid, col.ReadOnly, &conn)?;
|
CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid,
|
||||||
|
col.ReadOnly, col.HidePasswords,
|
||||||
|
&conn)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -830,22 +864,13 @@ struct RelationsData {
|
|||||||
fn post_org_import(
|
fn post_org_import(
|
||||||
query: Form<OrgIdData>,
|
query: Form<OrgIdData>,
|
||||||
data: JsonUpcase<ImportData>,
|
data: JsonUpcase<ImportData>,
|
||||||
headers: Headers,
|
headers: AdminHeaders,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
nt: Notify,
|
nt: Notify,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: ImportData = data.into_inner().data;
|
let data: ImportData = data.into_inner().data;
|
||||||
let org_id = query.into_inner().organization_id;
|
let org_id = query.into_inner().organization_id;
|
||||||
|
|
||||||
let org_user = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
|
||||||
Some(user) => user,
|
|
||||||
None => err!("User is not part of the organization"),
|
|
||||||
};
|
|
||||||
|
|
||||||
if org_user.atype < UserOrgType::Admin {
|
|
||||||
err!("Only admins or owners can import into an organization")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read and create the collections
|
// Read and create the collections
|
||||||
let collections: Vec<_> = data
|
let collections: Vec<_> = data
|
||||||
.Collections
|
.Collections
|
||||||
@@ -866,6 +891,8 @@ fn post_org_import(
|
|||||||
relations.push((relation.Key, relation.Value));
|
relations.push((relation.Key, relation.Value));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let headers: Headers = headers.into();
|
||||||
|
|
||||||
// Read and create the ciphers
|
// Read and create the ciphers
|
||||||
let ciphers: Vec<_> = data
|
let ciphers: Vec<_> = data
|
||||||
.Ciphers
|
.Ciphers
|
||||||
@@ -901,3 +928,249 @@ fn post_org_import(
|
|||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
user.update_revision(&conn)
|
user.update_revision(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/organizations/<org_id>/policies")]
|
||||||
|
fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
|
||||||
|
let policies = OrgPolicy::find_by_org(&org_id, &conn);
|
||||||
|
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
|
Json(json!({
|
||||||
|
"Data": policies_json,
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/organizations/<org_id>/policies/token?<token>")]
|
||||||
|
fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResult {
|
||||||
|
let invite = crate::auth::decode_invite(&token)?;
|
||||||
|
|
||||||
|
let invite_org_id = match invite.org_id {
|
||||||
|
Some(invite_org_id) => invite_org_id,
|
||||||
|
None => err!("Invalid token"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if invite_org_id != org_id {
|
||||||
|
err!("Token doesn't match request organization");
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: We receive the invite token as ?token=<>, validate it contains the org id
|
||||||
|
let policies = OrgPolicy::find_by_org(&org_id, &conn);
|
||||||
|
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Data": policies_json,
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/organizations/<org_id>/policies/<pol_type>")]
|
||||||
|
fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||||
|
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||||
|
Some(pt) => pt,
|
||||||
|
None => err!("Invalid or unsupported policy type"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
||||||
|
Some(p) => p,
|
||||||
|
None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Json(policy.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct PolicyData {
|
||||||
|
enabled: bool,
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
_type: i32,
|
||||||
|
data: Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/organizations/<org_id>/policies/<pol_type>", data = "<data>")]
|
||||||
|
fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||||
|
let data: PolicyData = data.into_inner();
|
||||||
|
|
||||||
|
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||||
|
Some(pt) => pt,
|
||||||
|
None => err!("Invalid policy type"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
||||||
|
Some(p) => p,
|
||||||
|
None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
policy.enabled = data.enabled;
|
||||||
|
policy.data = serde_json::to_string(&data.data)?;
|
||||||
|
policy.save(&conn)?;
|
||||||
|
|
||||||
|
Ok(Json(policy.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(unused_variables)]
|
||||||
|
#[get("/organizations/<org_id>/tax")]
|
||||||
|
fn get_organization_tax(org_id: String, _headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||||
|
// Prevent a 404 error, which also causes Javascript errors.
|
||||||
|
err!("Only allowed when not self hosted.")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/plans")]
|
||||||
|
fn get_plans(_headers: Headers, _conn: DbConn) -> Json<Value> {
|
||||||
|
Json(json!({
|
||||||
|
"Object": "list",
|
||||||
|
"Data": [
|
||||||
|
{
|
||||||
|
"Object": "plan",
|
||||||
|
"Type": 0,
|
||||||
|
"Product": 0,
|
||||||
|
"Name": "Free",
|
||||||
|
"IsAnnual": false,
|
||||||
|
"NameLocalizationKey": "planNameFree",
|
||||||
|
"DescriptionLocalizationKey": "planDescFree",
|
||||||
|
"CanBeUsedByBusiness": false,
|
||||||
|
"BaseSeats": 2,
|
||||||
|
"BaseStorageGb": null,
|
||||||
|
"MaxCollections": 2,
|
||||||
|
"MaxUsers": 2,
|
||||||
|
"HasAdditionalSeatsOption": false,
|
||||||
|
"MaxAdditionalSeats": null,
|
||||||
|
"HasAdditionalStorageOption": false,
|
||||||
|
"MaxAdditionalStorage": null,
|
||||||
|
"HasPremiumAccessOption": false,
|
||||||
|
"TrialPeriodDays": null,
|
||||||
|
"HasSelfHost": false,
|
||||||
|
"HasPolicies": false,
|
||||||
|
"HasGroups": false,
|
||||||
|
"HasDirectory": false,
|
||||||
|
"HasEvents": false,
|
||||||
|
"HasTotp": false,
|
||||||
|
"Has2fa": false,
|
||||||
|
"HasApi": false,
|
||||||
|
"HasSso": false,
|
||||||
|
"UsersGetPremium": false,
|
||||||
|
"UpgradeSortOrder": -1,
|
||||||
|
"DisplaySortOrder": -1,
|
||||||
|
"LegacyYear": null,
|
||||||
|
"Disabled": false,
|
||||||
|
"StripePlanId": null,
|
||||||
|
"StripeSeatPlanId": null,
|
||||||
|
"StripeStoragePlanId": null,
|
||||||
|
"StripePremiumAccessPlanId": null,
|
||||||
|
"BasePrice": 0.0,
|
||||||
|
"SeatPrice": 0.0,
|
||||||
|
"AdditionalStoragePricePerGb": 0.0,
|
||||||
|
"PremiumAccessOptionPrice": 0.0
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ContinuationToken": null
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/plans/sales-tax-rates")]
|
||||||
|
fn get_plans_tax_rates(_headers: Headers, _conn: DbConn) -> Json<Value> {
|
||||||
|
// Prevent a 404 error, which also causes Javascript errors.
|
||||||
|
Json(json!({
|
||||||
|
"Object": "list",
|
||||||
|
"Data": [],
|
||||||
|
"ContinuationToken": null
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct OrgImportGroupData {
|
||||||
|
Name: String, // "GroupName"
|
||||||
|
ExternalId: String, // "cn=GroupName,ou=Groups,dc=example,dc=com"
|
||||||
|
Users: Vec<String>, // ["uid=user,ou=People,dc=example,dc=com"]
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct OrgImportUserData {
|
||||||
|
Email: String, // "user@maildomain.net"
|
||||||
|
ExternalId: String, // "uid=user,ou=People,dc=example,dc=com"
|
||||||
|
Deleted: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct OrgImportData {
|
||||||
|
Groups: Vec<OrgImportGroupData>,
|
||||||
|
OverwriteExisting: bool,
|
||||||
|
Users: Vec<OrgImportUserData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/organizations/<org_id>/import", data = "<data>")]
|
||||||
|
fn import(org_id: String, data: JsonUpcase<OrgImportData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
|
let data = data.into_inner().data;
|
||||||
|
|
||||||
|
// TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way
|
||||||
|
// to differentiate between auto-imported users and manually added ones.
|
||||||
|
// This means that this endpoint can end up removing users that were added manually by an admin,
|
||||||
|
// as opposed to upstream which only removes auto-imported users.
|
||||||
|
|
||||||
|
// User needs to be admin or owner to use the Directry Connector
|
||||||
|
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||||
|
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
|
||||||
|
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
|
||||||
|
None => err!("User not part of organization"),
|
||||||
|
};
|
||||||
|
|
||||||
|
for user_data in &data.Users {
|
||||||
|
if user_data.Deleted {
|
||||||
|
// If user is marked for deletion and it exists, delete it
|
||||||
|
if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn) {
|
||||||
|
user_org.delete(&conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If user is not part of the organization, but it exists
|
||||||
|
} else if UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn).is_none() {
|
||||||
|
if let Some (user) = User::find_by_mail(&user_data.Email, &conn) {
|
||||||
|
|
||||||
|
let user_org_status = if CONFIG.mail_enabled() {
|
||||||
|
UserOrgStatus::Invited as i32
|
||||||
|
} else {
|
||||||
|
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||||
|
new_org_user.access_all = false;
|
||||||
|
new_org_user.atype = UserOrgType::User as i32;
|
||||||
|
new_org_user.status = user_org_status;
|
||||||
|
|
||||||
|
new_org_user.save(&conn)?;
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
let org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
||||||
|
Some(org) => org.name,
|
||||||
|
None => err!("Error looking up organization"),
|
||||||
|
};
|
||||||
|
|
||||||
|
mail::send_invite(
|
||||||
|
&user_data.Email,
|
||||||
|
&user.uuid,
|
||||||
|
Some(org_id.clone()),
|
||||||
|
Some(new_org_user.uuid),
|
||||||
|
&org_name,
|
||||||
|
Some(headers.user.email.clone()),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
||||||
|
if data.OverwriteExisting {
|
||||||
|
for user_org in UserOrganization::find_by_org_and_type(&org_id, UserOrgType::User as i32, &conn) {
|
||||||
|
if let Some (user_email) = User::find_by_uuid(&user_org.user_uuid, &conn).map(|u| u.email) {
|
||||||
|
if !data.Users.iter().any(|u| u.Email == user_email) {
|
||||||
|
user_org.delete(&conn)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
416
src/api/core/sends.rs
Normal file
416
src/api/core/sends.rs
Normal file
@@ -0,0 +1,416 @@
|
|||||||
|
use std::{io::Read, path::Path};
|
||||||
|
|
||||||
|
use chrono::{DateTime, Duration, Utc};
|
||||||
|
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||||
|
use rocket::{http::ContentType, Data};
|
||||||
|
use rocket_contrib::json::Json;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||||
|
auth::{Headers, Host},
|
||||||
|
db::{models::*, DbConn},
|
||||||
|
CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
|
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<rocket::Route> {
|
||||||
|
routes![
|
||||||
|
post_send,
|
||||||
|
post_send_file,
|
||||||
|
post_access,
|
||||||
|
post_access_file,
|
||||||
|
put_send,
|
||||||
|
delete_send,
|
||||||
|
put_remove_password
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start_send_deletion_scheduler(pool: crate::db::DbPool) {
|
||||||
|
std::thread::spawn(move || {
|
||||||
|
loop {
|
||||||
|
if let Ok(conn) = pool.get() {
|
||||||
|
info!("Initiating send deletion");
|
||||||
|
for send in Send::find_all(&conn) {
|
||||||
|
if chrono::Utc::now().naive_utc() >= send.deletion_date {
|
||||||
|
send.delete(&conn).ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::thread::sleep(std::time::Duration::from_secs(3600));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
pub struct SendData {
|
||||||
|
pub Type: i32,
|
||||||
|
pub Key: String,
|
||||||
|
pub Password: Option<String>,
|
||||||
|
pub MaxAccessCount: Option<i32>,
|
||||||
|
pub ExpirationDate: Option<DateTime<Utc>>,
|
||||||
|
pub DeletionDate: DateTime<Utc>,
|
||||||
|
pub Disabled: bool,
|
||||||
|
|
||||||
|
// Data field
|
||||||
|
pub Name: String,
|
||||||
|
pub Notes: Option<String>,
|
||||||
|
pub Text: Option<Value>,
|
||||||
|
pub File: Option<Value>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||||
|
/// an org with this policy enabled isn't allowed to create new Sends or
|
||||||
|
/// modify existing ones, but is allowed to delete them.
|
||||||
|
///
|
||||||
|
/// Ref: https://bitwarden.com/help/article/policies/#disable-send
|
||||||
|
fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||||
|
let user_uuid = &headers.user.uuid;
|
||||||
|
let policy_type = OrgPolicyType::DisableSend;
|
||||||
|
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||||
|
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||||
|
let data_val = if data.Type == SendType::Text as i32 {
|
||||||
|
data.Text
|
||||||
|
} else if data.Type == SendType::File as i32 {
|
||||||
|
data.File
|
||||||
|
} else {
|
||||||
|
err!("Invalid Send type")
|
||||||
|
};
|
||||||
|
|
||||||
|
let data_str = if let Some(mut d) = data_val {
|
||||||
|
d.as_object_mut().and_then(|o| o.remove("Response"));
|
||||||
|
serde_json::to_string(&d)?
|
||||||
|
} else {
|
||||||
|
err!("Send data not provided");
|
||||||
|
};
|
||||||
|
|
||||||
|
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
||||||
|
err!(
|
||||||
|
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
|
||||||
|
send.user_uuid = Some(user_uuid);
|
||||||
|
send.notes = data.Notes;
|
||||||
|
send.max_access_count = data.MaxAccessCount;
|
||||||
|
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||||
|
send.disabled = data.Disabled;
|
||||||
|
send.atype = data.Type;
|
||||||
|
|
||||||
|
send.set_password(data.Password.as_deref());
|
||||||
|
|
||||||
|
Ok(send)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/sends", data = "<data>")]
|
||||||
|
fn post_send(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
enforce_disable_send_policy(&headers, &conn)?;
|
||||||
|
|
||||||
|
let data: SendData = data.into_inner().data;
|
||||||
|
|
||||||
|
if data.Type == SendType::File as i32 {
|
||||||
|
err!("File sends should use /api/sends/file")
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut send = create_send(data, headers.user.uuid.clone())?;
|
||||||
|
send.save(&conn)?;
|
||||||
|
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
|
||||||
|
fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
enforce_disable_send_policy(&headers, &conn)?;
|
||||||
|
|
||||||
|
let boundary = content_type.params().next().expect("No boundary provided").1;
|
||||||
|
|
||||||
|
let mut mpart = Multipart::with_body(data.open(), boundary);
|
||||||
|
|
||||||
|
// First entry is the SendData JSON
|
||||||
|
let mut model_entry = match mpart.read_entry()? {
|
||||||
|
Some(e) if &*e.headers.name == "model" => e,
|
||||||
|
Some(_) => err!("Invalid entry name"),
|
||||||
|
None => err!("No model entry present"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut buf = String::new();
|
||||||
|
model_entry.data.read_to_string(&mut buf)?;
|
||||||
|
let data = serde_json::from_str::<crate::util::UpCase<SendData>>(&buf)?;
|
||||||
|
|
||||||
|
// Get the file length and add an extra 10% to avoid issues
|
||||||
|
const SIZE_110_MB: u64 = 115_343_360;
|
||||||
|
|
||||||
|
let size_limit = match CONFIG.user_attachment_limit() {
|
||||||
|
Some(0) => err!("File uploads are disabled"),
|
||||||
|
Some(limit_kb) => {
|
||||||
|
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn);
|
||||||
|
if left <= 0 {
|
||||||
|
err!("Attachment size limit reached! Delete some files to open space")
|
||||||
|
}
|
||||||
|
std::cmp::Ord::max(left as u64, SIZE_110_MB)
|
||||||
|
}
|
||||||
|
None => SIZE_110_MB,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create the Send
|
||||||
|
let mut send = create_send(data.data, headers.user.uuid.clone())?;
|
||||||
|
let file_id: String = data_encoding::HEXLOWER.encode(&crate::crypto::get_random(vec![0; 32]));
|
||||||
|
|
||||||
|
if send.atype != SendType::File as i32 {
|
||||||
|
err!("Send content is not a file");
|
||||||
|
}
|
||||||
|
|
||||||
|
let file_path = Path::new(&CONFIG.sends_folder()).join(&send.uuid).join(&file_id);
|
||||||
|
|
||||||
|
// Read the data entry and save the file
|
||||||
|
let mut data_entry = match mpart.read_entry()? {
|
||||||
|
Some(e) if &*e.headers.name == "data" => e,
|
||||||
|
Some(_) => err!("Invalid entry name"),
|
||||||
|
None => err!("No model entry present"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let size = match data_entry
|
||||||
|
.data
|
||||||
|
.save()
|
||||||
|
.memory_threshold(0)
|
||||||
|
.size_limit(size_limit)
|
||||||
|
.with_path(&file_path)
|
||||||
|
{
|
||||||
|
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||||
|
SaveResult::Full(other) => {
|
||||||
|
std::fs::remove_file(&file_path).ok();
|
||||||
|
err!(format!("Attachment is not a file: {:?}", other));
|
||||||
|
}
|
||||||
|
SaveResult::Partial(_, reason) => {
|
||||||
|
std::fs::remove_file(&file_path).ok();
|
||||||
|
err!(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
||||||
|
}
|
||||||
|
SaveResult::Error(e) => {
|
||||||
|
std::fs::remove_file(&file_path).ok();
|
||||||
|
err!(format!("Error: {:?}", e));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Set ID and sizes
|
||||||
|
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||||
|
if let Some(o) = data_value.as_object_mut() {
|
||||||
|
o.insert(String::from("Id"), Value::String(file_id));
|
||||||
|
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||||
|
o.insert(
|
||||||
|
String::from("SizeName"),
|
||||||
|
Value::String(crate::util::get_display_size(size)),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
send.data = serde_json::to_string(&data_value)?;
|
||||||
|
|
||||||
|
// Save the changes in the database
|
||||||
|
send.save(&conn)?;
|
||||||
|
nt.send_user_update(UpdateType::SyncSendCreate, &headers.user);
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
pub struct SendAccessData {
|
||||||
|
pub Password: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/sends/access/<access_id>", data = "<data>")]
|
||||||
|
fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn) -> JsonResult {
|
||||||
|
let mut send = match Send::find_by_access_id(&access_id, &conn) {
|
||||||
|
Some(s) => s,
|
||||||
|
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(max_access_count) = send.max_access_count {
|
||||||
|
if send.access_count >= max_access_count {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(expiration) = send.expiration_date {
|
||||||
|
if Utc::now().naive_utc() >= expiration {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if Utc::now().naive_utc() >= send.deletion_date {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
|
||||||
|
if send.disabled {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
|
||||||
|
if send.password_hash.is_some() {
|
||||||
|
match data.into_inner().data.Password {
|
||||||
|
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||||
|
Some(_) => err!("Invalid password."),
|
||||||
|
None => err_code!("Password not provided", 401),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Files are incremented during the download
|
||||||
|
if send.atype == SendType::Text as i32 {
|
||||||
|
send.access_count += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
send.save(&conn)?;
|
||||||
|
|
||||||
|
Ok(Json(send.to_json_access()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
||||||
|
fn post_access_file(
|
||||||
|
send_id: String,
|
||||||
|
file_id: String,
|
||||||
|
data: JsonUpcase<SendAccessData>,
|
||||||
|
host: Host,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
let mut send = match Send::find_by_uuid(&send_id, &conn) {
|
||||||
|
Some(s) => s,
|
||||||
|
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(max_access_count) = send.max_access_count {
|
||||||
|
if send.access_count >= max_access_count {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(expiration) = send.expiration_date {
|
||||||
|
if Utc::now().naive_utc() >= expiration {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if Utc::now().naive_utc() >= send.deletion_date {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
|
||||||
|
if send.disabled {
|
||||||
|
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||||
|
}
|
||||||
|
|
||||||
|
if send.password_hash.is_some() {
|
||||||
|
match data.into_inner().data.Password {
|
||||||
|
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||||
|
Some(_) => err!("Invalid password."),
|
||||||
|
None => err_code!("Password not provided", 401),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
send.access_count += 1;
|
||||||
|
|
||||||
|
send.save(&conn)?;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Object": "send-fileDownload",
|
||||||
|
"Id": file_id,
|
||||||
|
"Url": format!("{}/sends/{}/{}", &host.host, send_id, file_id)
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/sends/<id>", data = "<data>")]
|
||||||
|
fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
enforce_disable_send_policy(&headers, &conn)?;
|
||||||
|
|
||||||
|
let data: SendData = data.into_inner().data;
|
||||||
|
|
||||||
|
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||||
|
Some(s) => s,
|
||||||
|
None => err!("Send not found"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||||
|
err!("Send is not owned by user")
|
||||||
|
}
|
||||||
|
|
||||||
|
if send.atype != data.Type {
|
||||||
|
err!("Sends can't change type")
|
||||||
|
}
|
||||||
|
|
||||||
|
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
||||||
|
// so we only need to update the data field in the Text case
|
||||||
|
if data.Type == SendType::Text as i32 {
|
||||||
|
let data_str = if let Some(mut d) = data.Text {
|
||||||
|
d.as_object_mut().and_then(|d| d.remove("Response"));
|
||||||
|
serde_json::to_string(&d)?
|
||||||
|
} else {
|
||||||
|
err!("Send data not provided");
|
||||||
|
};
|
||||||
|
send.data = data_str;
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
||||||
|
err!(
|
||||||
|
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
send.name = data.Name;
|
||||||
|
send.akey = data.Key;
|
||||||
|
send.deletion_date = data.DeletionDate.naive_utc();
|
||||||
|
send.notes = data.Notes;
|
||||||
|
send.max_access_count = data.MaxAccessCount;
|
||||||
|
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||||
|
send.disabled = data.Disabled;
|
||||||
|
|
||||||
|
// Only change the value if it's present
|
||||||
|
if let Some(password) = data.Password {
|
||||||
|
send.set_password(Some(&password));
|
||||||
|
}
|
||||||
|
|
||||||
|
send.save(&conn)?;
|
||||||
|
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[delete("/sends/<id>")]
|
||||||
|
fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
let send = match Send::find_by_uuid(&id, &conn) {
|
||||||
|
Some(s) => s,
|
||||||
|
None => err!("Send not found"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||||
|
err!("Send is not owned by user")
|
||||||
|
}
|
||||||
|
|
||||||
|
send.delete(&conn)?;
|
||||||
|
nt.send_user_update(UpdateType::SyncSendDelete, &headers.user);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/sends/<id>/remove-password")]
|
||||||
|
fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
enforce_disable_send_policy(&headers, &conn)?;
|
||||||
|
|
||||||
|
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||||
|
Some(s) => s,
|
||||||
|
None => err!("Send not found"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||||
|
err!("Send is not owned by user")
|
||||||
|
}
|
||||||
|
|
||||||
|
send.set_password(None);
|
||||||
|
send.save(&conn)?;
|
||||||
|
nt.send_user_update(UpdateType::SyncSendUpdate, &headers.user);
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
@@ -2,13 +2,16 @@ use data_encoding::BASE32;
|
|||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
|
|
||||||
use crate::api::core::two_factor::_generate_recover_code;
|
use crate::{
|
||||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
|
api::{
|
||||||
use crate::auth::Headers;
|
core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||||
use crate::crypto;
|
},
|
||||||
use crate::db::{
|
auth::{ClientIp, Headers},
|
||||||
|
crypto,
|
||||||
|
db::{
|
||||||
models::{TwoFactor, TwoFactorType},
|
models::{TwoFactor, TwoFactorType},
|
||||||
DbConn,
|
DbConn,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
pub use crate::config::CONFIG;
|
pub use crate::config::CONFIG;
|
||||||
@@ -20,6 +23,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
activate_authenticator_put,
|
activate_authenticator_put,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||||
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordData = data.into_inner().data;
|
||||||
@@ -53,7 +57,12 @@ struct EnableAuthenticatorData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/authenticator", data = "<data>")]
|
#[post("/two-factor/authenticator", data = "<data>")]
|
||||||
fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn activate_authenticator(
|
||||||
|
data: JsonUpcase<EnableAuthenticatorData>,
|
||||||
|
headers: Headers,
|
||||||
|
ip: ClientIp,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
let data: EnableAuthenticatorData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
let password_hash = data.MasterPasswordHash;
|
||||||
let key = data.Key;
|
let key = data.Key;
|
||||||
@@ -76,7 +85,7 @@ fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: He
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Validate the token provided with the key, and save new twofactor
|
// Validate the token provided with the key, and save new twofactor
|
||||||
validate_totp_code(&user.uuid, token, &key.to_uppercase(), &conn)?;
|
validate_totp_code(&user.uuid, token, &key.to_uppercase(), &ip, &conn)?;
|
||||||
|
|
||||||
_generate_recover_code(&mut user, &conn);
|
_generate_recover_code(&mut user, &conn);
|
||||||
|
|
||||||
@@ -88,22 +97,32 @@ fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: He
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/authenticator", data = "<data>")]
|
#[put("/two-factor/authenticator", data = "<data>")]
|
||||||
fn activate_authenticator_put(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn activate_authenticator_put(
|
||||||
activate_authenticator(data, headers, conn)
|
data: JsonUpcase<EnableAuthenticatorData>,
|
||||||
|
headers: Headers,
|
||||||
|
ip: ClientIp,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
activate_authenticator(data, headers, ip, conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn validate_totp_code_str(user_uuid: &str, totp_code: &str, secret: &str, conn: &DbConn) -> EmptyResult {
|
pub fn validate_totp_code_str(
|
||||||
|
user_uuid: &str,
|
||||||
|
totp_code: &str,
|
||||||
|
secret: &str,
|
||||||
|
ip: &ClientIp,
|
||||||
|
conn: &DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
let totp_code: u64 = match totp_code.parse() {
|
let totp_code: u64 = match totp_code.parse() {
|
||||||
Ok(code) => code,
|
Ok(code) => code,
|
||||||
_ => err!("TOTP code is not a number"),
|
_ => err!("TOTP code is not a number"),
|
||||||
};
|
};
|
||||||
|
|
||||||
validate_totp_code(user_uuid, totp_code, secret, &conn)
|
validate_totp_code(user_uuid, totp_code, secret, ip, &conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, conn: &DbConn) -> EmptyResult {
|
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &ClientIp, conn: &DbConn) -> EmptyResult {
|
||||||
use oath::{totp_raw_custom_time, HashType};
|
use oath::{totp_raw_custom_time, HashType};
|
||||||
use std::time::{UNIX_EPOCH, SystemTime};
|
|
||||||
|
|
||||||
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
|
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
|
||||||
Ok(s) => s,
|
Ok(s) => s,
|
||||||
@@ -116,24 +135,23 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, conn: &
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Get the current system time in UNIX Epoch (UTC)
|
// Get the current system time in UNIX Epoch (UTC)
|
||||||
let current_time: u64 = SystemTime::now().duration_since(UNIX_EPOCH)
|
let current_time = chrono::Utc::now();
|
||||||
.expect("Earlier than 1970-01-01 00:00:00 UTC").as_secs();
|
let current_timestamp = current_time.timestamp();
|
||||||
|
|
||||||
// The amount of steps back and forward in time
|
// The amount of steps back and forward in time
|
||||||
// Also check if we need to disable time drifted TOTP codes.
|
// Also check if we need to disable time drifted TOTP codes.
|
||||||
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
|
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
|
||||||
let steps = if CONFIG.authenticator_disable_time_drift() { 0 } else { 1 };
|
let steps: i64 = if CONFIG.authenticator_disable_time_drift() { 0 } else { 1 };
|
||||||
|
|
||||||
for step in -steps..=steps {
|
for step in -steps..=steps {
|
||||||
let time_step = (current_time / 30) as i32 + step;
|
let time_step = current_timestamp / 30i64 + step;
|
||||||
// We need to calculate the time offsite and cast it as an i128.
|
// We need to calculate the time offsite and cast it as an i128.
|
||||||
// Else we can't do math with it on a default u64 variable.
|
// Else we can't do math with it on a default u64 variable.
|
||||||
let time_offset: i128 = (step * 30).into();
|
let time = (current_timestamp + step * 30i64) as u64;
|
||||||
let generated = totp_raw_custom_time(&decoded_secret, 6, 0, 30, (current_time as i128 + time_offset) as u64, &HashType::SHA1);
|
let generated = totp_raw_custom_time(&decoded_secret, 6, 0, 30, time, &HashType::SHA1);
|
||||||
|
|
||||||
// Check the the given code equals the generated and if the time_step is larger then the one last used.
|
// Check the the given code equals the generated and if the time_step is larger then the one last used.
|
||||||
if generated == totp_code && time_step > twofactor.last_used {
|
if generated == totp_code && time_step > twofactor.last_used as i64 {
|
||||||
|
|
||||||
// If the step does not equals 0 the time is drifted either server or client side.
|
// If the step does not equals 0 the time is drifted either server or client side.
|
||||||
if step != 0 {
|
if step != 0 {
|
||||||
info!("TOTP Time drift detected. The step offset is {}", step);
|
info!("TOTP Time drift detected. The step offset is {}", step);
|
||||||
@@ -141,15 +159,26 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, conn: &
|
|||||||
|
|
||||||
// Save the last used time step so only totp time steps higher then this one are allowed.
|
// Save the last used time step so only totp time steps higher then this one are allowed.
|
||||||
// This will also save a newly created twofactor if the code is correct.
|
// This will also save a newly created twofactor if the code is correct.
|
||||||
twofactor.last_used = time_step;
|
twofactor.last_used = time_step as i32;
|
||||||
twofactor.save(&conn)?;
|
twofactor.save(&conn)?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
} else if generated == totp_code && time_step <= twofactor.last_used {
|
} else if generated == totp_code && time_step <= twofactor.last_used as i64 {
|
||||||
warn!("This or a TOTP code within {} steps back and forward has already been used!", steps);
|
warn!(
|
||||||
err!("Invalid TOTP Code!");
|
"This or a TOTP code within {} steps back and forward has already been used!",
|
||||||
|
steps
|
||||||
|
);
|
||||||
|
err!(format!(
|
||||||
|
"Invalid TOTP code! Server time: {} IP: {}",
|
||||||
|
current_time.format("%F %T UTC"),
|
||||||
|
ip.ip
|
||||||
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Else no valide code received, deny access
|
// Else no valide code received, deny access
|
||||||
err!("Invalid TOTP code!");
|
err!(format!(
|
||||||
|
"Invalid TOTP code! Server time: {} IP: {}",
|
||||||
|
current_time.format("%F %T UTC"),
|
||||||
|
ip.ip
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user