mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-09-10 18:55:57 +03:00
Compare commits
668 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
2c2276c5bb | ||
|
672a245548 | ||
|
2d2745195e | ||
|
026f9da035 | ||
|
d23d4f2c1d | ||
|
515b87755a | ||
|
d8ea3d2bfe | ||
|
ee7837d022 | ||
|
07743e490b | ||
|
9101d6e48f | ||
|
27c23b60b8 | ||
|
e7b6238f43 | ||
|
8be2ed6255 | ||
|
c9c3f07171 | ||
|
8a21c6df10 | ||
|
df71f57d86 | ||
|
60e39a9dd1 | ||
|
bc6a53b847 | ||
|
05a1137828 | ||
|
cef38bf40b | ||
|
0b13a8c4aa | ||
|
3fbd7919d8 | ||
|
5f688ff209 | ||
|
f6cfb5bf21 | ||
|
df8c9f39ac | ||
|
d7ee7caed4 | ||
|
2e300da057 | ||
|
3fb63bbe8c | ||
|
9671ed4cca | ||
|
d10ef3fd4b | ||
|
dd0b847912 | ||
|
8c34ff5d23 | ||
|
15750256e2 | ||
|
6989fc7bdb | ||
|
4923614730 | ||
|
76f38621de | ||
|
fff72889f6 | ||
|
12af32b9ea | ||
|
9add8e19eb | ||
|
5710703c50 | ||
|
1322b876e9 | ||
|
9ed2ba61c6 | ||
|
62a461ae15 | ||
|
6f7220b68e | ||
|
4859932d35 | ||
|
ee277de707 | ||
|
c11f47903a | ||
|
6a5f1613e7 | ||
|
dc36f0cb6c | ||
|
6c38026ef5 | ||
|
4c9cc9890c | ||
|
f57b407c60 | ||
|
ce0651b79c | ||
|
edc26cb1e1 | ||
|
ff759397f6 | ||
|
badd22ac3d | ||
|
6f78395ef7 | ||
|
5fb6531db8 | ||
|
eb9d5e1196 | ||
|
233b48bdad | ||
|
e22e290f67 | ||
|
ab95a69dc8 | ||
|
85c8a01f4a | ||
|
42af7c6dab | ||
|
08a445e2ac | ||
|
c0b2877da3 | ||
|
cf8ca85289 | ||
|
a8a92f6c51 | ||
|
95f833aacd | ||
|
4f45cc081f | ||
|
2a4cd24c60 | ||
|
ef551f4cc6 | ||
|
4545f271c3 | ||
|
2768396a72 | ||
|
5521a86693 | ||
|
3160780549 | ||
|
f0701657a9 | ||
|
21325b7523 | ||
|
874f5c34bd | ||
|
eadab2e9ca | ||
|
253faaf023 | ||
|
3d843a6a51 | ||
|
03fdf36bf9 | ||
|
fdcc32beda | ||
|
bf20355c5e | ||
|
0136c793b4 | ||
|
2e12114350 | ||
|
f25ab42ebb | ||
|
d3a8a278e6 | ||
|
8d9827c55f | ||
|
cad63f9761 | ||
|
bf446f44f9 | ||
|
621f607297 | ||
|
d89bd707a8 | ||
|
754087b990 | ||
|
cfbeb56371 | ||
|
3bb46ce496 | ||
|
c5832f2b30 | ||
|
d9406b0095 | ||
|
2475c36a75 | ||
|
c384f9c0ca | ||
|
afbfebf659 | ||
|
6b686c18f7 | ||
|
349cb33fbd | ||
|
d7542b6818 | ||
|
7976d39d9d | ||
|
5ee9676941 | ||
|
4b40cda910 | ||
|
4689ed7b30 | ||
|
084bc2aee3 | ||
|
6d7e15b2fd | ||
|
61515160a7 | ||
|
a25bfdd16d | ||
|
e93538cea9 | ||
|
b4244b28b6 | ||
|
43f9038325 | ||
|
27872f476e | ||
|
339044f8aa | ||
|
0718a090e1 | ||
|
9e1f030a80 | ||
|
04922f6aa0 | ||
|
7d2bc9e162 | ||
|
c6c00729e3 | ||
|
10756b0920 | ||
|
1eb1502a07 | ||
|
30e72a96a9 | ||
|
2646db78a4 | ||
|
f5358b13f5 | ||
|
d156170971 | ||
|
d9bfe847db | ||
|
473f8b8e31 | ||
|
aeb4b4c8a5 | ||
|
980a3e45db | ||
|
5794969f5b | ||
|
8b5b06c3d1 | ||
|
b50c27b619 | ||
|
5ee04e31e5 | ||
|
bf6ae91a6d | ||
|
828e3a5795 | ||
|
7b5bcd45f8 | ||
|
72de16fb86 | ||
|
0b903fc5f4 | ||
|
4df686f49e | ||
|
d7eeaaf249 | ||
|
84fb6aaddb | ||
|
a744b9437a | ||
|
6027b969f5 | ||
|
93805a5d7b | ||
|
71da961ecd | ||
|
dd421809e5 | ||
|
8526055bb7 | ||
|
a79334ea4c | ||
|
274ea9a4f2 | ||
|
8743d18aca | ||
|
d3773a433a | ||
|
0f0a87becf | ||
|
4b57bb8eeb | ||
|
3b27dbb0aa | ||
|
ff2fbd322e | ||
|
9636f33fdb | ||
|
bbe2a1b264 | ||
|
79fdfd6524 | ||
|
d086a99e5b | ||
|
22b0b95209 | ||
|
28d1588e73 | ||
|
f3b1a5ff3e | ||
|
330e90a6ac | ||
|
8fac72db53 | ||
|
820c8b0dce | ||
|
8b4a6f2a64 | ||
|
ef63342e20 | ||
|
89840790e7 | ||
|
a72809b225 | ||
|
9976e4736e | ||
|
dc92f07232 | ||
|
3db815b969 | ||
|
ade293cf52 | ||
|
877408b808 | ||
|
86ed75bf7c | ||
|
20d8d800f3 | ||
|
7ce06b3808 | ||
|
08ca47cadb | ||
|
0bd3a26051 | ||
|
5272b465cc | ||
|
b75f38033b | ||
|
637f655b6f | ||
|
b3f7394c06 | ||
|
1a5ecd4d4a | ||
|
bd65c4e312 | ||
|
bce656c787 | ||
|
06522c9ac0 | ||
|
9026cc8d42 | ||
|
574b040142 | ||
|
48113b7bd9 | ||
|
c13f115473 | ||
|
1e20f9f1d8 | ||
|
bc461d9baa | ||
|
5016e30cf2 | ||
|
f42ac5f2c0 | ||
|
2a60414031 | ||
|
9a2a304860 | ||
|
feb74a5e86 | ||
|
c0e350b734 | ||
|
bef1183c49 | ||
|
f935f5cf46 | ||
|
07388d327f | ||
|
4de16b2d17 | ||
|
da068a43c1 | ||
|
9657463717 | ||
|
69036cc6a4 | ||
|
700e084101 | ||
|
a1dc47b826 | ||
|
86de0ca17b | ||
|
80414f8452 | ||
|
fc0e239bdf | ||
|
928ad6c1d8 | ||
|
9d027b96d8 | ||
|
ddd49596ba | ||
|
b8cabadd43 | ||
|
ce42b07a80 | ||
|
bfd93e5b13 | ||
|
a797459560 | ||
|
6cbb683f99 | ||
|
92bbb98d48 | ||
|
834c847746 | ||
|
97aa407fe4 | ||
|
86a254ad9e | ||
|
64c38856cc | ||
|
b4f6206eda | ||
|
82f828a327 | ||
|
d8116a80df | ||
|
e0aec8d373 | ||
|
1ce2587330 | ||
|
20964ac2d8 | ||
|
71a10e0378 | ||
|
9bf13b7872 | ||
|
d420992f8c | ||
|
c259a0e3e2 | ||
|
432be274ba | ||
|
484bf5b703 | ||
|
979b6305af | ||
|
4bf32af60e | ||
|
0e4a746eeb | ||
|
2fe919cc5e | ||
|
bcd750695f | ||
|
19b6bb0fd6 | ||
|
60f6a350be | ||
|
f571df7367 | ||
|
de51bc782e | ||
|
c5aef60bd7 | ||
|
8b07ecb937 | ||
|
6f52104324 | ||
|
1d7f704754 | ||
|
1d034749f7 | ||
|
08c55f636a | ||
|
0dc5d1a1c6 | ||
|
1b11445bb2 | ||
|
1596e1d4c5 | ||
|
320266606e | ||
|
a0a08c4c5a | ||
|
4309df8334 | ||
|
f1161c65fb | ||
|
50eeb4f651 | ||
|
21b85b78b1 | ||
|
673adde9f1 | ||
|
c9063a06b4 | ||
|
62b8500aae | ||
|
f645e5381c | ||
|
5e37471488 | ||
|
0a74e79cea | ||
|
7db66f73f0 | ||
|
2f5bdc23f6 | ||
|
94adf063ad | ||
|
77cada4085 | ||
|
cec28a85ac | ||
|
5f49ecd7f3 | ||
|
736c0e62f2 | ||
|
43eb064351 | ||
|
c8af62ed48 | ||
|
6e47535c2e | ||
|
0448d98afc | ||
|
b8326a15a3 | ||
|
a6631c2ea8 | ||
|
30e768613b | ||
|
72ed05c4a4 | ||
|
adb8052689 | ||
|
1483829c94 | ||
|
acb9d1b3c6 | ||
|
301919d9d4 | ||
|
2bb0b15e04 | ||
|
250a2b340f | ||
|
b2fc0499f6 | ||
|
6a99849a1e | ||
|
172f1770cf | ||
|
1b5134dfe2 | ||
|
5fecf09631 | ||
|
9a8cae836b | ||
|
7d7d8afed9 | ||
|
f20c4705d9 | ||
|
3142d8d01f | ||
|
84fa5a4ed6 | ||
|
004a3f891f | ||
|
e197f372b5 | ||
|
e7ea5097f4 | ||
|
8451a70de6 | ||
|
9f0357ce82 | ||
|
cd6e4a0ebd | ||
|
473740c13a | ||
|
ec715d78fb | ||
|
3f5df3ef8d | ||
|
f694d6f839 | ||
|
b20516d645 | ||
|
c04300651a | ||
|
2f058d3ff5 | ||
|
582f967a59 | ||
|
00bc355220 | ||
|
e3395ee910 | ||
|
cb78ba9bb2 | ||
|
b0ac640d8b | ||
|
2b24b17609 | ||
|
2cd736ab81 | ||
|
99256b9b3a | ||
|
26bf7bc12f | ||
|
b3ec8f2611 | ||
|
a55c048a62 | ||
|
848cd1dbec | ||
|
149e69414f | ||
|
9a7d3634d5 | ||
|
7f7c936049 | ||
|
9479108fb7 | ||
|
042c1072d9 | ||
|
1f4edb38e6 | ||
|
371017b547 | ||
|
bc20592712 | ||
|
37a6da3443 | ||
|
53bd169462 | ||
|
5a9aab1a32 | ||
|
23eadf2c9a | ||
|
d5dfda8905 | ||
|
e245e965ba | ||
|
ce15c7ffba | ||
|
037eb0b790 | ||
|
4910b14d57 | ||
|
d428120ec6 | ||
|
e2907f4250 | ||
|
680f5e83d8 | ||
|
a335bcd682 | ||
|
76c510c5b6 | ||
|
4635d62e2c | ||
|
65e36e2931 | ||
|
063dfcf487 | ||
|
d3e4fb88ee | ||
|
7ebba736cb | ||
|
a1272c7190 | ||
|
0a6b4e9961 | ||
|
e66edc1ce9 | ||
|
4e827e4f8a | ||
|
295985de7c | ||
|
a48611aa6d | ||
|
f713e2e092 | ||
|
e26e2319da | ||
|
f2ab25085d | ||
|
7adc045b80 | ||
|
19754c967f | ||
|
738ad2127b | ||
|
cb930a0858 | ||
|
94810c106a | ||
|
2fde4e6933 | ||
|
259a2f2982 | ||
|
dc1bb6de20 | ||
|
e34f75c267 | ||
|
2129946d14 | ||
|
bdcdb08fc1 | ||
|
8b5d97790f | ||
|
cac7fb145b | ||
|
6990525e8a | ||
|
55579261b7 | ||
|
70c20f7f52 | ||
|
b56a905322 | ||
|
e177160ee9 | ||
|
6364c05789 | ||
|
f71f10eac6 | ||
|
0f6ab01f77 | ||
|
0935cb90a4 | ||
|
e86cac984a | ||
|
f1acc1e05a | ||
|
3d36ac4601 | ||
|
bf1c96695b | ||
|
9ecb29883c | ||
|
8b3e87cfe0 | ||
|
e5af230315 | ||
|
a1da82c868 | ||
|
981a324027 | ||
|
00abd4c853 | ||
|
a4550e51ea | ||
|
5edbd0e952 | ||
|
1d4944b88e | ||
|
660c8f8d7e | ||
|
478fc0c9dd | ||
|
a2de4ce40e | ||
|
17052b665f | ||
|
f344dbaad4 | ||
|
c5c9e3fd65 | ||
|
aba9c28226 | ||
|
82e2b8a8c0 | ||
|
5a8d5e426d | ||
|
f9ecb7201b | ||
|
044cf19913 | ||
|
2c233cda8b | ||
|
6d735806c0 | ||
|
2433d39df5 | ||
|
9e0e4b13c5 | ||
|
e66436625c | ||
|
24a4478b5c | ||
|
f84cbeaaf8 | ||
|
3cb911a52f | ||
|
dd684753d0 | ||
|
f3e6cc6ffd | ||
|
b94f4db52a | ||
|
66a4c5d48b | ||
|
b4e222d598 | ||
|
dd00591082 | ||
|
4638786507 | ||
|
1e9dd2fd4e | ||
|
62bc58e145 | ||
|
760e0ab805 | ||
|
6eb1c3d638 | ||
|
f408efc927 | ||
|
9b20247fc2 | ||
|
8f5bfe7938 | ||
|
b359df7045 | ||
|
e844f41abc | ||
|
bc532f54d5 | ||
|
c673370103 | ||
|
f1b1000600 | ||
|
c0e248c457 | ||
|
f510a1b060 | ||
|
fafc3883c6 | ||
|
1bdb98d139 | ||
|
2f5ca88fb1 | ||
|
e7a24159c5 | ||
|
e056cc8178 | ||
|
8ce4c79612 | ||
|
77d9641323 | ||
|
31e4237247 | ||
|
c32c65d367 | ||
|
0a4dbaf307 | ||
|
daa66b08dc | ||
|
d613fa1e68 | ||
|
55fbd8d468 | ||
|
adf40291e8 | ||
|
acfc900997 | ||
|
0a08b1afc8 | ||
|
eb48a3fac2 | ||
|
2e7fa6440b | ||
|
9ecc98c3cc | ||
|
02fd68d63b | ||
|
235bce1ecb | ||
|
e985221b50 | ||
|
77cf63c06d | ||
|
faec050a6d | ||
|
22304f4925 | ||
|
58a78ffa54 | ||
|
64f6c60bfd | ||
|
e0614620ef | ||
|
a28caa33ef | ||
|
ce4fedf191 | ||
|
f2078a3849 | ||
|
5292d38c73 | ||
|
1049646e27 | ||
|
380cf06211 | ||
|
1f35ef2865 | ||
|
c29bc9309a | ||
|
7112c86471 | ||
|
2aabf14372 | ||
|
77ff9c91c5 | ||
|
d9457e929c | ||
|
86b49856a7 | ||
|
54f54ee845 | ||
|
015bd28cc2 | ||
|
990c83a037 | ||
|
c3c74506a7 | ||
|
fb4e6bab14 | ||
|
fe38f95f15 | ||
|
9eaa9c1a17 | ||
|
8ee681c4a3 | ||
|
08aee97c1d | ||
|
2bb6482bec | ||
|
c169095128 | ||
|
b1397c95ca | ||
|
3df31e3464 | ||
|
638a0fd3c3 | ||
|
ebb66c374e | ||
|
89e3c41043 | ||
|
3da410ef71 | ||
|
2dccbd3412 | ||
|
2ff529ed99 | ||
|
4fae1e4298 | ||
|
f7951b44ba | ||
|
ff8eeff995 | ||
|
00019dc356 | ||
|
404fe5321e | ||
|
e7dd239d20 | ||
|
071f3370e3 | ||
|
ee321be579 | ||
|
eb61425da5 | ||
|
b75ba216d1 | ||
|
8651df8c2a | ||
|
948554a20f | ||
|
9cdb605659 | ||
|
928e2424c0 | ||
|
a01fee0b9f | ||
|
924e4a17e5 | ||
|
fdbd73c716 | ||
|
f397f0cbd0 | ||
|
4d2c6e39b2 | ||
|
3e1afb139c | ||
|
af69606bea | ||
|
bc8ff14695 | ||
|
5f7b220eb4 | ||
|
67adfee5e5 | ||
|
d66d4fd87f | ||
|
1b20a25514 | ||
|
c1cd4d9a6b | ||
|
b63693aefb | ||
|
ec05f14f5a | ||
|
37d88be2be | ||
|
1c641d7635 | ||
|
e2ab2f7306 | ||
|
434551e012 | ||
|
69dcbdd3b2 | ||
|
8df6f79f19 | ||
|
422f7ccfa8 | ||
|
c58682e3fb | ||
|
db111ae2a0 | ||
|
049aa33f17 | ||
|
b1ac37609f | ||
|
53e8f78af6 | ||
|
1bced97e04 | ||
|
f8ae5013cb | ||
|
d8e5e53273 | ||
|
b6502e9e9d | ||
|
d70864ac73 | ||
|
f94e626021 | ||
|
0a3b84b815 | ||
|
d336d89b83 | ||
|
1a5c1979e3 | ||
|
cec9566d2a | ||
|
fe473b9e75 | ||
|
062ae4dd59 | ||
|
45d676eb10 | ||
|
3cfdf9b585 | ||
|
08b551624c | ||
|
761a0a3393 | ||
|
6660b0aef3 | ||
|
781056152a | ||
|
6822bb28a0 | ||
|
b82710eecf | ||
|
c386b3bcf7 | ||
|
ffec0b065b | ||
|
5b7fe9f155 | ||
|
8d1ee859f2 | ||
|
c91f80c456 | ||
|
39891e86a0 | ||
|
575f701390 | ||
|
335099cd30 | ||
|
9fad541c87 | ||
|
007e053e2f | ||
|
ef2413a5aa | ||
|
ca8e1c646d | ||
|
346c7630c9 | ||
|
1c57c9d8e0 | ||
|
bd20d8724b | ||
|
69a18255c6 | ||
|
c40baf5e17 | ||
|
df041108f6 | ||
|
ee10d278a7 | ||
|
2b2401be19 | ||
|
4f58d07c83 | ||
|
9eea0151ba | ||
|
40d09ddd2a | ||
|
d332e87655 | ||
|
0fa48a749f | ||
|
a5ef8aef0f | ||
|
4fb09c5b4d | ||
|
9e63985b28 | ||
|
6fdeeb56ce | ||
|
b002d34cd4 | ||
|
e46fc62b78 | ||
|
401aa7c699 | ||
|
12a2dc0901 | ||
|
b3f3fd81ac | ||
|
f2fec345ec | ||
|
b6312340b6 | ||
|
3d1fc0f2e8 | ||
|
d68f57cbba | ||
|
80bad9f66d | ||
|
19e0605d30 | ||
|
812387e586 | ||
|
5ecafb157d | ||
|
f1ade62638 | ||
|
00b882935f | ||
|
eb5641b863 | ||
|
0dfd9c7670 | ||
|
6ede1743ac | ||
|
d3f357b708 | ||
|
5a55dd1d4b | ||
|
16056626b0 | ||
|
f7ffb81d9e | ||
|
626a3c93ba | ||
|
c0f554311b | ||
|
3f5a99916a | ||
|
b5a057f063 | ||
|
e7e0717f5b | ||
|
3fd3d8d5e9 | ||
|
7b2de40beb | ||
|
5f6d721c09 | ||
|
ddda86b90d | ||
|
c6256e1455 | ||
|
0cd3053fcb | ||
|
58c1545707 | ||
|
d3b4b10d18 | ||
|
c031ae9f2f | ||
|
672e3273cd | ||
|
039860f87e | ||
|
9511456ded | ||
|
04b198a7e2 | ||
|
73a1abed10 | ||
|
fb7b1c8c18 | ||
|
8ffa7ebb6a | ||
|
aac1304b46 | ||
|
7dfc759691 | ||
|
54afe0671e | ||
|
74e2ca81ae | ||
|
d6fadb52ff | ||
|
b163aeb8ca | ||
|
fcb479a457 | ||
|
0e095a9fa4 | ||
|
2f6aa3c363 | ||
|
fcc485384f | ||
|
91a2319325 | ||
|
56b3afa77c | ||
|
d335f45e34 | ||
|
34d2648509 | ||
|
f39c4fe2f4 | ||
|
01875c395b | ||
|
2872f40d13 | ||
|
07a30c8334 | ||
|
ceb3d0314d | ||
|
d7df545078 | ||
|
d073f06652 | ||
|
3726da9c14 | ||
|
51450a0df9 | ||
|
659f677897 | ||
|
a291dea16f | ||
|
98bae4a0a1 | ||
|
48e69cebab | ||
|
798a3b6a43 | ||
|
2dc1427027 | ||
|
233d23a527 | ||
|
06f7bd7c97 | ||
|
458a238c38 | ||
|
de72655bb1 | ||
|
4a2350891a | ||
|
4677ae4ac6 | ||
|
31349a47d3 | ||
|
55b7a3e4d1 | ||
|
692ed81306 |
@@ -9,10 +9,6 @@ data
|
|||||||
.idea
|
.idea
|
||||||
*.iml
|
*.iml
|
||||||
|
|
||||||
# Git files
|
|
||||||
.git
|
|
||||||
.gitignore
|
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
*.md
|
*.md
|
||||||
|
|
||||||
|
40
.env
40
.env
@@ -1,40 +0,0 @@
|
|||||||
## Bitwarden_RS Configuration File
|
|
||||||
## Uncomment any of the following lines to change the defaults
|
|
||||||
|
|
||||||
## Main data folder
|
|
||||||
# DATA_FOLDER=data
|
|
||||||
|
|
||||||
## Individual folders, these override %DATA_FOLDER%
|
|
||||||
# DATABASE_URL=data/db.sqlite3
|
|
||||||
# RSA_KEY_FILENAME=data/rsa_key
|
|
||||||
# ICON_CACHE_FOLDER=data/icon_cache
|
|
||||||
# ATTACHMENTS_FOLDER=data/attachments
|
|
||||||
|
|
||||||
## Web vault settings
|
|
||||||
# WEB_VAULT_FOLDER=web-vault/
|
|
||||||
# WEB_VAULT_ENABLED=true
|
|
||||||
|
|
||||||
## Controls if new users can register
|
|
||||||
# SIGNUPS_ALLOWED=true
|
|
||||||
|
|
||||||
## Use a local favicon extractor
|
|
||||||
## Set to false to use bitwarden's official icon servers
|
|
||||||
## Set to true to use the local version, which is not as smart,
|
|
||||||
## but it doesn't send the cipher domains to bitwarden's servers
|
|
||||||
# LOCAL_ICON_EXTRACTOR=false
|
|
||||||
|
|
||||||
## Controls the PBBKDF password iterations to apply on the server
|
|
||||||
## The change only applies when the password is changed
|
|
||||||
# PASSWORD_ITERATIONS=100000
|
|
||||||
|
|
||||||
## Domain settings
|
|
||||||
## The domain must match the address from where you access the server
|
|
||||||
## Unless you are using U2F, or having problems with attachments not downloading, there is no need to change this
|
|
||||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
|
||||||
# DOMAIN=https://bw.domain.tld:8443
|
|
||||||
|
|
||||||
## Rocket specific settings, check Rocket documentation to learn more
|
|
||||||
# ROCKET_ENV=staging
|
|
||||||
# ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app
|
|
||||||
# ROCKET_PORT=8000
|
|
||||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
|
152
.env.template
Normal file
152
.env.template
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
## Bitwarden_RS Configuration File
|
||||||
|
## Uncomment any of the following lines to change the defaults
|
||||||
|
|
||||||
|
## Main data folder
|
||||||
|
# DATA_FOLDER=data
|
||||||
|
|
||||||
|
## Database URL
|
||||||
|
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||||
|
## When using MySQL, this it is the URL to the DB, including username and password:
|
||||||
|
## Format: mysql://[user[:password]@]host/database_name
|
||||||
|
# DATABASE_URL=data/db.sqlite3
|
||||||
|
|
||||||
|
## Individual folders, these override %DATA_FOLDER%
|
||||||
|
# RSA_KEY_FILENAME=data/rsa_key
|
||||||
|
# ICON_CACHE_FOLDER=data/icon_cache
|
||||||
|
# ATTACHMENTS_FOLDER=data/attachments
|
||||||
|
|
||||||
|
## Templates data folder, by default uses embedded templates
|
||||||
|
## Check source code to see the format
|
||||||
|
# TEMPLATES_FOLDER=/path/to/templates
|
||||||
|
## Automatically reload the templates for every request, slow, use only for development
|
||||||
|
# RELOAD_TEMPLATES=false
|
||||||
|
|
||||||
|
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||||
|
# ICON_CACHE_TTL=2592000
|
||||||
|
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
||||||
|
# ICON_CACHE_NEGTTL=259200
|
||||||
|
|
||||||
|
## Web vault settings
|
||||||
|
# WEB_VAULT_FOLDER=web-vault/
|
||||||
|
# WEB_VAULT_ENABLED=true
|
||||||
|
|
||||||
|
## Enables websocket notifications
|
||||||
|
# WEBSOCKET_ENABLED=false
|
||||||
|
|
||||||
|
## Controls the WebSocket server address and port
|
||||||
|
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||||
|
# WEBSOCKET_PORT=3012
|
||||||
|
|
||||||
|
## Enable extended logging
|
||||||
|
## This shows timestamps and allows logging to file and to syslog
|
||||||
|
### To enable logging to file, use the LOG_FILE env variable
|
||||||
|
### To enable syslog, use the USE_SYSLOG env variable
|
||||||
|
# EXTENDED_LOGGING=true
|
||||||
|
|
||||||
|
## Logging to file
|
||||||
|
## This requires extended logging
|
||||||
|
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||||
|
# LOG_FILE=/path/to/log
|
||||||
|
|
||||||
|
## Logging to Syslog
|
||||||
|
## This requires extended logging
|
||||||
|
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||||
|
# USE_SYSLOG=false
|
||||||
|
|
||||||
|
## Log level
|
||||||
|
## Change the verbosity of the log output
|
||||||
|
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
|
## This requires extended logging
|
||||||
|
# LOG_LEVEL=Info
|
||||||
|
|
||||||
|
## Enable WAL for the DB
|
||||||
|
## Set to false to avoid enabling WAL during startup.
|
||||||
|
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||||
|
## this setting only prevents bitwarden_rs from automatically enabling it on start.
|
||||||
|
## Please read project wiki page about this setting first before changing the value as it can
|
||||||
|
## cause performance degradation or might render the service unable to start.
|
||||||
|
# ENABLE_DB_WAL=true
|
||||||
|
|
||||||
|
## Disable icon downloading
|
||||||
|
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
||||||
|
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||||
|
## otherwise it will delete them and they won't be downloaded again.
|
||||||
|
# DISABLE_ICON_DOWNLOAD=false
|
||||||
|
|
||||||
|
## Icon download timeout
|
||||||
|
## Configure the timeout value when downloading the favicons.
|
||||||
|
## The default is 10 seconds, but this could be to low on slower network connections
|
||||||
|
# ICON_DOWNLOAD_TIMEOUT=10
|
||||||
|
|
||||||
|
## Icon blacklist Regex
|
||||||
|
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||||
|
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
|
# ICON_BLACKLIST_REGEX=192\.168\.1\.[0-9].*^
|
||||||
|
|
||||||
|
## Disable 2FA remember
|
||||||
|
## Enabling this would force the users to use a second factor to login every time.
|
||||||
|
## Note that the checkbox would still be present, but ignored.
|
||||||
|
# DISABLE_2FA_REMEMBER=false
|
||||||
|
|
||||||
|
## Controls if new users can register
|
||||||
|
# SIGNUPS_ALLOWED=true
|
||||||
|
|
||||||
|
## Token for the admin interface, preferably use a long random string
|
||||||
|
## One option is to use 'openssl rand -base64 48'
|
||||||
|
## If not set, the admin panel is disabled
|
||||||
|
# ADMIN_TOKEN=Vy2VyYTTsKPv8W5aEOWUbB/Bt3DEKePbHmI4m9VcemUMS2rEviDowNAFqYi1xjmp
|
||||||
|
# DISABLE_ADMIN_TOKEN=false
|
||||||
|
|
||||||
|
## Invitations org admins to invite users, even when signups are disabled
|
||||||
|
# INVITATIONS_ALLOWED=true
|
||||||
|
|
||||||
|
## Controls the PBBKDF password iterations to apply on the server
|
||||||
|
## The change only applies when the password is changed
|
||||||
|
# PASSWORD_ITERATIONS=100000
|
||||||
|
|
||||||
|
## Whether password hint should be sent into the error response when the client request it
|
||||||
|
# SHOW_PASSWORD_HINT=true
|
||||||
|
|
||||||
|
## Domain settings
|
||||||
|
## The domain must match the address from where you access the server
|
||||||
|
## It's recommended to configure this value, otherwise certain functionality might not work,
|
||||||
|
## like attachment downloads, email links and U2F.
|
||||||
|
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||||
|
# DOMAIN=https://bw.domain.tld:8443
|
||||||
|
|
||||||
|
## Yubico (Yubikey) Settings
|
||||||
|
## Set your Client ID and Secret Key for Yubikey OTP
|
||||||
|
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
||||||
|
## You can optionally specify a custom OTP server
|
||||||
|
# YUBICO_CLIENT_ID=11111
|
||||||
|
# YUBICO_SECRET_KEY=AAAAAAAAAAAAAAAAAAAAAAAA
|
||||||
|
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
|
||||||
|
|
||||||
|
## Duo Settings
|
||||||
|
## You need to configure all options to enable global Duo support, otherwise users would need to configure it themselves
|
||||||
|
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
|
||||||
|
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
|
||||||
|
## Then set the following options, based on the values obtained from the last step:
|
||||||
|
# DUO_IKEY=<Integration Key>
|
||||||
|
# DUO_SKEY=<Secret Key>
|
||||||
|
# DUO_HOST=<API Hostname>
|
||||||
|
## After that, you should be able to follow the rest of the guide linked above,
|
||||||
|
## ignoring the fields that ask for the values that you already configured beforehand.
|
||||||
|
|
||||||
|
## Rocket specific settings, check Rocket documentation to learn more
|
||||||
|
# ROCKET_ENV=staging
|
||||||
|
# ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app
|
||||||
|
# ROCKET_PORT=8000
|
||||||
|
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||||
|
|
||||||
|
## Mail specific settings, set SMTP_HOST and SMTP_FROM to enable the mail service.
|
||||||
|
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||||
|
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
|
||||||
|
# SMTP_HOST=smtp.domain.tld
|
||||||
|
# SMTP_FROM=bitwarden-rs@domain.tld
|
||||||
|
# SMTP_FROM_NAME=Bitwarden_RS
|
||||||
|
# SMTP_PORT=587
|
||||||
|
# SMTP_SSL=true
|
||||||
|
# SMTP_USERNAME=username
|
||||||
|
# SMTP_PASSWORD=password
|
||||||
|
# SMTP_AUTH_MECHANISM="Plain"
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -10,7 +10,7 @@ data
|
|||||||
*.iml
|
*.iml
|
||||||
|
|
||||||
# Environment file
|
# Environment file
|
||||||
# .env
|
.env
|
||||||
|
|
||||||
# Web vault
|
# Web vault
|
||||||
web-vault
|
web-vault
|
7
.hadolint.yaml
Normal file
7
.hadolint.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
ignored:
|
||||||
|
# disable explicit version for apt install
|
||||||
|
- DL3008
|
||||||
|
# disable explicit version for apk install
|
||||||
|
- DL3018
|
||||||
|
trustedRegistries:
|
||||||
|
- docker.io
|
20
.travis.yml
Normal file
20
.travis.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
dist: xenial
|
||||||
|
|
||||||
|
env:
|
||||||
|
global:
|
||||||
|
- HADOLINT_VERSION=1.17.1
|
||||||
|
|
||||||
|
language: rust
|
||||||
|
rust: nightly
|
||||||
|
cache: cargo
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint
|
||||||
|
- sudo chmod +rx /usr/local/bin/hadolint
|
||||||
|
|
||||||
|
# Nothing to install
|
||||||
|
install: true
|
||||||
|
script:
|
||||||
|
- git ls-files --exclude='Dockerfile*' --ignored | xargs --max-lines=1 hadolint
|
||||||
|
- cargo build --features "sqlite"
|
||||||
|
- cargo build --features "mysql"
|
69
BUILD.md
69
BUILD.md
@@ -1,69 +0,0 @@
|
|||||||
# Build instructions
|
|
||||||
|
|
||||||
## Dependencies
|
|
||||||
- `Rust nightly` (strongly recommended to use [rustup](https://rustup.rs/))
|
|
||||||
- `OpenSSL` (should be available in path, install through your system's package manager or use the [prebuilt binaries](https://wiki.openssl.org/index.php/Binaries))
|
|
||||||
- `NodeJS` (required to build the web-vault, (install through your system's package manager or use the [prebuilt binaries](https://nodejs.org/en/download/))
|
|
||||||
|
|
||||||
|
|
||||||
## Run/Compile
|
|
||||||
```sh
|
|
||||||
# Compile and run
|
|
||||||
cargo run
|
|
||||||
# or just compile (binary located in target/release/bitwarden_rs)
|
|
||||||
cargo build --release
|
|
||||||
```
|
|
||||||
|
|
||||||
When run, the server is accessible in [http://localhost:80](http://localhost:80).
|
|
||||||
|
|
||||||
### Install the web-vault
|
|
||||||
Download the latest official release from the [releases page](https://github.com/bitwarden/web/releases) and extract it.
|
|
||||||
|
|
||||||
Modify `web-vault/settings.Production.json` to look like this:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"appSettings": {
|
|
||||||
"apiUri": "/api",
|
|
||||||
"identityUri": "/identity",
|
|
||||||
"iconsUri": "/icons",
|
|
||||||
"stripeKey": "",
|
|
||||||
"braintreeKey": ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, run the following from the `web-vault` directory:
|
|
||||||
```sh
|
|
||||||
npm install
|
|
||||||
npx gulp dist:selfHosted
|
|
||||||
```
|
|
||||||
|
|
||||||
Finally copy the contents of the `web-vault/dist` folder into the `bitwarden_rs/web-vault` folder.
|
|
||||||
|
|
||||||
# Configuration
|
|
||||||
The available configuration options are documented in the default `.env` file, and they can be modified by uncommenting the desired options in that file or by setting their respective environment variables. Look at the README file for the main configuration options available.
|
|
||||||
|
|
||||||
Note: the environment variables override the values set in the `.env` file.
|
|
||||||
|
|
||||||
## How to recreate database schemas (for developers)
|
|
||||||
Install diesel-cli with cargo:
|
|
||||||
```sh
|
|
||||||
cargo install diesel_cli --no-default-features --features sqlite-bundled
|
|
||||||
```
|
|
||||||
|
|
||||||
Make sure that the correct path to the database is in the `.env` file.
|
|
||||||
|
|
||||||
If you want to modify the schemas, create a new migration with:
|
|
||||||
```
|
|
||||||
diesel migration generate <name>
|
|
||||||
```
|
|
||||||
|
|
||||||
Modify the *.sql files, making sure that any changes are reverted in the down.sql file.
|
|
||||||
|
|
||||||
Apply the migrations and save the generated schemas as follows:
|
|
||||||
```sh
|
|
||||||
diesel migration redo
|
|
||||||
|
|
||||||
# This step should be done automatically when using diesel-cli > 1.3.0
|
|
||||||
# diesel print-schema > src/db/schema.rs
|
|
||||||
```
|
|
2999
Cargo.lock
generated
2999
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
103
Cargo.toml
103
Cargo.toml
@@ -1,66 +1,117 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "bitwarden_rs"
|
name = "bitwarden_rs"
|
||||||
version = "0.10.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
repository = "https://github.com/dani-garcia/bitwarden_rs"
|
||||||
|
readme = "README.md"
|
||||||
|
license = "GPL-3.0-only"
|
||||||
|
publish = false
|
||||||
|
build = "build.rs"
|
||||||
|
|
||||||
|
[features]
|
||||||
|
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
||||||
|
enable_syslog = []
|
||||||
|
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||||
|
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||||
|
|
||||||
|
[target."cfg(not(windows))".dependencies]
|
||||||
|
syslog = "4.0.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
|
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
|
||||||
rocket = { version = "0.3.14", features = ["tls"] }
|
rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
|
||||||
rocket_codegen = "0.3.14"
|
rocket_contrib = "0.5.0-dev"
|
||||||
rocket_contrib = "0.3.14"
|
|
||||||
|
|
||||||
# HTTP client
|
# HTTP client
|
||||||
reqwest = "0.8.6"
|
reqwest = "0.9.19"
|
||||||
|
|
||||||
# multipart/form-data support
|
# multipart/form-data support
|
||||||
multipart = "0.14.2"
|
multipart = { version = "0.16.1", features = ["server"], default-features = false }
|
||||||
|
|
||||||
|
# WebSockets library
|
||||||
|
ws = "0.9.0"
|
||||||
|
|
||||||
|
# MessagePack library
|
||||||
|
rmpv = "0.4.0"
|
||||||
|
|
||||||
|
# Concurrent hashmap implementation
|
||||||
|
chashmap = "2.2.2"
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = "1.0.70"
|
serde = "1.0.99"
|
||||||
serde_derive = "1.0.70"
|
serde_derive = "1.0.99"
|
||||||
serde_json = "1.0.22"
|
serde_json = "1.0.40"
|
||||||
|
|
||||||
|
# Logging
|
||||||
|
log = "0.4.8"
|
||||||
|
fern = { version = "0.5.8", features = ["syslog-4"] }
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "1.3.2", features = ["sqlite", "chrono", "r2d2"] }
|
diesel = { version = "1.4.2", features = [ "chrono", "r2d2"] }
|
||||||
diesel_migrations = { version = "1.3.0", features = ["sqlite"] }
|
diesel_migrations = "1.4.0"
|
||||||
|
|
||||||
# Bundled SQLite
|
# Bundled SQLite
|
||||||
libsqlite3-sys = { version = "0.9.1", features = ["bundled"] }
|
libsqlite3-sys = { version = "0.12.0", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto library
|
# Crypto library
|
||||||
ring = { version = "= 0.11.0", features = ["rsa_signing"] }
|
ring = "0.14.6"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "0.6.5", features = ["v4"] }
|
uuid = { version = "0.7.4", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time library for Rust
|
# Date and time library for Rust
|
||||||
chrono = "0.4.4"
|
chrono = "0.4.7"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
oath = "0.10.2"
|
oath = "0.10.2"
|
||||||
|
|
||||||
# Data encoding library
|
# Data encoding library
|
||||||
data-encoding = "2.1.1"
|
data-encoding = "2.1.2"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "= 4.0.1"
|
jsonwebtoken = "6.0.1"
|
||||||
|
|
||||||
# U2F library
|
# U2F library
|
||||||
u2f = "0.1.2"
|
u2f = "0.1.6"
|
||||||
|
|
||||||
|
# Yubico Library
|
||||||
|
yubico = { version = "0.6.1", features = ["online", "online-tokio"], default-features = false }
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# A `dotenv` implementation for Rust
|
||||||
dotenv = { version = "0.13.0", default-features = false }
|
dotenv = { version = "0.14.1", default-features = false }
|
||||||
|
|
||||||
# Lazy static macro
|
# Lazy static macro
|
||||||
lazy_static = "1.0.1"
|
lazy_static = "1.3.0"
|
||||||
|
|
||||||
|
# More derives
|
||||||
|
derive_more = "0.15.0"
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.5"
|
num-traits = "0.2.8"
|
||||||
num-derive = "0.2.2"
|
num-derive = "0.2.5"
|
||||||
|
|
||||||
|
# Email libraries
|
||||||
|
lettre = "0.9.2"
|
||||||
|
lettre_email = "0.9.2"
|
||||||
|
native-tls = "0.2.3"
|
||||||
|
quoted_printable = "0.4.1"
|
||||||
|
|
||||||
|
# Template library
|
||||||
|
handlebars = "2.0.1"
|
||||||
|
|
||||||
|
# For favicon extraction from main website
|
||||||
|
soup = "0.4.1"
|
||||||
|
regex = "1.2.1"
|
||||||
|
|
||||||
|
# URL encoding library
|
||||||
|
percent-encoding = "2.1.0"
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
# Make jwt use ring 0.11, to match rocket
|
# Add support for Timestamp type
|
||||||
jsonwebtoken = { path = "libs/jsonwebtoken" }
|
rmp = { git = 'https://github.com/dani-garcia/msgpack-rust' }
|
||||||
|
|
||||||
# Version 0.1.2 from crates.io lacks a commit that fixes a certificate error
|
# Use newest ring
|
||||||
u2f = { git = 'https://github.com/wisespace-io/u2f-rs', rev = '193de35093a44' }
|
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dbcb0a75b9556763ac3ab708f40c8f8ed75f1a1e' }
|
||||||
|
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'dbcb0a75b9556763ac3ab708f40c8f8ed75f1a1e' }
|
||||||
|
91
Dockerfile
91
Dockerfile
@@ -1,91 +0,0 @@
|
|||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
FROM node:9-alpine as vault
|
|
||||||
|
|
||||||
ENV VAULT_VERSION "1.27.0"
|
|
||||||
ENV URL "https://github.com/bitwarden/web/archive/v${VAULT_VERSION}.tar.gz"
|
|
||||||
|
|
||||||
RUN apk add --update-cache --upgrade \
|
|
||||||
curl \
|
|
||||||
git \
|
|
||||||
tar \
|
|
||||||
&& npm install -g \
|
|
||||||
gulp-cli \
|
|
||||||
gulp
|
|
||||||
|
|
||||||
RUN mkdir /web-build \
|
|
||||||
&& cd /web-build \
|
|
||||||
&& curl -L "${URL}" | tar -xvz --strip-components=1
|
|
||||||
|
|
||||||
WORKDIR /web-build
|
|
||||||
|
|
||||||
COPY /docker/settings.Production.json /web-build/
|
|
||||||
|
|
||||||
RUN git config --global url."https://github.com/".insteadOf ssh://git@github.com/ \
|
|
||||||
&& npm install \
|
|
||||||
&& gulp dist:selfHosted \
|
|
||||||
&& mv dist /web-vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust as build
|
|
||||||
|
|
||||||
# Using bundled SQLite, no need to install it
|
|
||||||
# RUN apt-get update && apt-get install -y\
|
|
||||||
# sqlite3\
|
|
||||||
# --no-install-recommends\
|
|
||||||
# && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and vendored dependencies
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./libs ./libs
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --release
|
|
||||||
RUN find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM debian:stretch-slim
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y\
|
|
||||||
openssl\
|
|
||||||
ca-certificates\
|
|
||||||
--no-install-recommends\
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Copies the files from the context (env file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY .env .
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
CMD ./bitwarden_rs
|
|
1
Dockerfile
Symbolic link
1
Dockerfile
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
docker/amd64/sqlite/Dockerfile
|
301
README.md
301
README.md
@@ -1,36 +1,21 @@
|
|||||||
This is Bitwarden server API implementation written in rust compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, ideal for self-hosted deployment where running official resource-heavy service might not be ideal.
|
### This is a Bitwarden server API implementation written in Rust compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
[](https://travis-ci.org/dani-garcia/bitwarden_rs)
|
||||||
|
[](https://hub.docker.com/r/bitwardenrs/server)
|
||||||
|
[](https://deps.rs/repo/github/dani-garcia/bitwarden_rs)
|
||||||
|
[](https://github.com/dani-garcia/bitwarden_rs/releases/latest)
|
||||||
|
[](https://github.com/dani-garcia/bitwarden_rs/blob/master/LICENSE.txt)
|
||||||
|
[](https://matrix.to/#/#bitwarden_rs:matrix.org)
|
||||||
|
|
||||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/bitwarden_rs).
|
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/bitwarden_rs).
|
||||||
|
|
||||||
_*Note, that this project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC._
|
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||||
|
|
||||||
## Table of contents <!-- omit in toc -->
|
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any Bitwarden related bug-reports or suggestions [here](https://github.com/dani-garcia/bitwarden_rs/issues/new), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
||||||
- [Features](#features)
|
|
||||||
- [Docker image usage](#docker-image-usage)
|
---
|
||||||
- [Starting a container](#starting-a-container)
|
|
||||||
- [Updating the bitwarden image](#updating-the-bitwarden-image)
|
|
||||||
- [Configuring bitwarden service](#configuring-bitwarden-service)
|
|
||||||
- [Disable registration of new users](#disable-registration-of-new-users)
|
|
||||||
- [Enabling HTTPS](#enabling-https)
|
|
||||||
- [Enabling U2F authentication](#enabling-u2f-authentication)
|
|
||||||
- [Changing persistent data location](#changing-persistent-data-location)
|
|
||||||
- [/data prefix:](#data-prefix)
|
|
||||||
- [database name and location](#database-name-and-location)
|
|
||||||
- [attachments location](#attachments-location)
|
|
||||||
- [icons cache](#icons-cache)
|
|
||||||
- [Changing the API request size limit](#changing-the-api-request-size-limit)
|
|
||||||
- [Other configuration](#other-configuration)
|
|
||||||
- [Building your own image](#building-your-own-image)
|
|
||||||
- [Building binary](#building-binary)
|
|
||||||
- [Available packages](#available-packages)
|
|
||||||
- [Arch Linux](#arch-linux)
|
|
||||||
- [Backing up your vault](#backing-up-your-vault)
|
|
||||||
- [1. the sqlite3 database](#1-the-sqlite3-database)
|
|
||||||
- [2. the attachments folder](#2-the-attachments-folder)
|
|
||||||
- [3. the key files](#3-the-key-files)
|
|
||||||
- [4. Icon Cache](#4-icon-cache)
|
|
||||||
- [Running the server with non-root user](#running-the-server-with-non-root-user)
|
|
||||||
- [Get in touch](#get-in-touch)
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
@@ -43,262 +28,28 @@ Basically full implementation of Bitwarden API is provided including:
|
|||||||
* Serving the static files for Vault interface
|
* Serving the static files for Vault interface
|
||||||
* Website icons API
|
* Website icons API
|
||||||
* Authenticator and U2F support
|
* Authenticator and U2F support
|
||||||
|
* YubiKey OTP
|
||||||
|
|
||||||
## Missing features
|
## Installation
|
||||||
* Email confirmation
|
Pull the docker image and mount a volume from the host for persistent storage:
|
||||||
* Other two-factor systems:
|
|
||||||
* YubiKey OTP (if your key supports U2F, you can use that)
|
|
||||||
* Duo
|
|
||||||
* Email codes
|
|
||||||
|
|
||||||
## Docker image usage
|
|
||||||
|
|
||||||
### Starting a container
|
|
||||||
|
|
||||||
The persistent data is stored under /data inside the container, so the only requirement for persistent deployment using Docker is to mount persistent volume at the path:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 mprasil/bitwarden:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
This will preserve any persistent data under `/bw-data/`, you can adapt the path to whatever suits you.
|
|
||||||
|
|
||||||
The service will be exposed on port 80.
|
|
||||||
|
|
||||||
### Updating the bitwarden image
|
|
||||||
|
|
||||||
Updating is straightforward, you just make sure to preserve the mounted volume. If you used the bind-mounted path as in the example above, you just need to `pull` the latest image, `stop` and `rm` the current container and then start a new one the same way as before:
|
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
# Pull the latest version
|
docker pull bitwardenrs/server:latest
|
||||||
docker pull mprasil/bitwarden:latest
|
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 bitwardenrs/server:latest
|
||||||
|
|
||||||
# Stop and remove the old container
|
|
||||||
docker stop bitwarden
|
|
||||||
docker rm bitwarden
|
|
||||||
|
|
||||||
# Start new container with the data mounted
|
|
||||||
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 mprasil/bitwarden:latest
|
|
||||||
```
|
```
|
||||||
Then visit [http://localhost:80](http://localhost:80)
|
This will preserve any persistent data under /bw-data/, you can adapt the path to whatever suits you.
|
||||||
|
|
||||||
In case you didn't bind mount the volume for persistent data, you need an intermediate step where you preserve the data with an intermediate container:
|
**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS.
|
||||||
|
|
||||||
```sh
|
This can be configured in [bitwarden_rs directly](https://github.com/dani-garcia/bitwarden_rs/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/bitwarden_rs/wiki/Proxy-examples)).
|
||||||
# Pull the latest version
|
|
||||||
docker pull mprasil/bitwarden:latest
|
|
||||||
|
|
||||||
# Create intermediate container to preserve data
|
If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).
|
||||||
docker run --volumes-from bitwarden --name bitwarden_data busybox true
|
|
||||||
|
|
||||||
# Stop and remove the old container
|
## Usage
|
||||||
docker stop bitwarden
|
See the [bitwarden_rs wiki](https://github.com/dani-garcia/bitwarden_rs/wiki) for more information on how to configure and run the bitwarden_rs server.
|
||||||
docker rm bitwarden
|
|
||||||
|
|
||||||
# Start new container with the data mounted
|
|
||||||
docker run -d --volumes-from bitwarden_data --name bitwarden -p 80:80 mprasil/bitwarden:latest
|
|
||||||
|
|
||||||
# Optionally remove the intermediate container
|
|
||||||
docker rm bitwarden_data
|
|
||||||
|
|
||||||
# Alternatively you can keep data container around for future updates in which case you can skip last step.
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuring bitwarden service
|
|
||||||
|
|
||||||
### Disable registration of new users
|
|
||||||
|
|
||||||
By default new users can register, if you want to disable that, set the `SIGNUPS_ALLOWED` env variable to `false`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run -d --name bitwarden \
|
|
||||||
-e SIGNUPS_ALLOWED=false \
|
|
||||||
-v /bw-data/:/data/ \
|
|
||||||
-p 80:80 \
|
|
||||||
mprasil/bitwarden:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
### Enabling HTTPS
|
|
||||||
To enable HTTPS, you need to configure the `ROCKET_TLS`.
|
|
||||||
|
|
||||||
The values to the option must follow the format:
|
|
||||||
```
|
|
||||||
ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
|
||||||
```
|
|
||||||
Where:
|
|
||||||
- certs: a path to a certificate chain in PEM format
|
|
||||||
- key: a path to a private key file in PEM format for the certificate in certs
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run -d --name bitwarden \
|
|
||||||
-e ROCKET_TLS={certs='"/ssl/certs.pem",key="/ssl/key.pem"}' \
|
|
||||||
-v /ssl/keys/:/ssl/ \
|
|
||||||
-v /bw-data/:/data/ \
|
|
||||||
-v /icon_cache/ \
|
|
||||||
-p 443:443 \
|
|
||||||
mprasil/bitwarden:latest
|
|
||||||
```
|
|
||||||
Note that you need to mount ssl files and you need to forward appropriate port.
|
|
||||||
|
|
||||||
### Enabling U2F authentication
|
|
||||||
To enable U2F authentication, you must be serving bitwarden_rs from an HTTPS domain with a valid certificate (Either using the included
|
|
||||||
HTTPS options or with a reverse proxy). We recommend using a free certificate from Let's Encrypt.
|
|
||||||
|
|
||||||
After that, you need to set the `DOMAIN` environment variable to the same address from where bitwarden_rs is being served:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run -d --name bitwarden \
|
|
||||||
-e DOMAIN=https://bw.domain.tld \
|
|
||||||
-v /bw-data/:/data/ \
|
|
||||||
-p 80:80 \
|
|
||||||
mprasil/bitwarden:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that the value has to include the `https://` and it may include a port at the end (in the format of `https://bw.domain.tld:port`) when not using `443`.
|
|
||||||
|
|
||||||
### Changing persistent data location
|
|
||||||
|
|
||||||
#### /data prefix:
|
|
||||||
|
|
||||||
By default all persistent data is saved under `/data`, you can override this path by setting the `DATA_FOLDER` env variable:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run -d --name bitwarden \
|
|
||||||
-e DATA_FOLDER=/persistent \
|
|
||||||
-v /bw-data/:/persistent/ \
|
|
||||||
-p 80:80 \
|
|
||||||
mprasil/bitwarden:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
Notice, that you need to adapt your volume mount accordingly.
|
|
||||||
|
|
||||||
#### database name and location
|
|
||||||
|
|
||||||
Default is `$DATA_FOLDER/db.sqlite3`, you can change the path specifically for database using `DATABASE_URL` variable:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run -d --name bitwarden \
|
|
||||||
-e DATABASE_URL=/database/bitwarden.sqlite3 \
|
|
||||||
-v /bw-data/:/data/ \
|
|
||||||
-v /bw-database/:/database/ \
|
|
||||||
-p 80:80 \
|
|
||||||
mprasil/bitwarden:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
Note, that you need to remember to mount the volume for both database and other persistent data if they are different.
|
|
||||||
|
|
||||||
#### attachments location
|
|
||||||
|
|
||||||
Default is `$DATA_FOLDER/attachments`, you can change the path using `ATTACHMENTS_FOLDER` variable:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run -d --name bitwarden \
|
|
||||||
-e ATTACHMENTS_FOLDER=/attachments \
|
|
||||||
-v /bw-data/:/data/ \
|
|
||||||
-v /bw-attachments/:/attachments/ \
|
|
||||||
-p 80:80 \
|
|
||||||
mprasil/bitwarden:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
Note, that you need to remember to mount the volume for both attachments and other persistent data if they are different.
|
|
||||||
|
|
||||||
#### icons cache
|
|
||||||
|
|
||||||
Default is `$DATA_FOLDER/icon_cache`, you can change the path using `ICON_CACHE_FOLDER` variable:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run -d --name bitwarden \
|
|
||||||
-e ICON_CACHE_FOLDER=/icon_cache \
|
|
||||||
-v /bw-data/:/data/ \
|
|
||||||
-v /icon_cache/ \
|
|
||||||
-p 80:80 \
|
|
||||||
mprasil/bitwarden:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
Note, that in the above example we don't mount the volume locally, which means it won't be persisted during the upgrade unless you use intermediate data container using `--volumes-from`. This will impact performance as bitwarden will have to re-download the icons on restart, but might save you from having stale icons in cache as they are not automatically cleaned.
|
|
||||||
|
|
||||||
### Changing the API request size limit
|
|
||||||
|
|
||||||
By default the API calls are limited to 10MB. This should be sufficient for most cases, however if you want to support large imports, this might be limiting you. On the other hand you might want to limit the request size to something smaller than that to prevent API abuse and possible DOS attack, especially if running with limited resources.
|
|
||||||
|
|
||||||
To set the limit, you can use the `ROCKET_LIMITS` variable. Example here shows 10MB limit for posted json in the body (this is the default):
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run -d --name bitwarden \
|
|
||||||
-e ROCKET_LIMITS={json=10485760} \
|
|
||||||
-v /bw-data/:/data/ \
|
|
||||||
-p 80:80 \
|
|
||||||
mprasil/bitwarden:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
### Other configuration
|
|
||||||
|
|
||||||
Though this is unlikely to be required in small deployment, you can fine-tune some other settings like number of workers using environment variables that are processed by [Rocket](https://rocket.rs), please see details in [documentation](https://rocket.rs/guide/configuration/#environment-variables).
|
|
||||||
|
|
||||||
## Building your own image
|
|
||||||
|
|
||||||
Clone the repository, then from the root of the repository run:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# Build the docker image:
|
|
||||||
docker build -t bitwarden_rs .
|
|
||||||
```
|
|
||||||
|
|
||||||
## Building binary
|
|
||||||
|
|
||||||
For building binary outside the Docker environment and running it locally without docker, please see [build instructions](BUILD.md).
|
|
||||||
|
|
||||||
## Available packages
|
|
||||||
|
|
||||||
### Arch Linux
|
|
||||||
|
|
||||||
Bitwarden_rs is already packaged for Archlinux thanks to @mqus. There is an AUR package [with](https://aur.archlinux.org/packages/bitwarden_rs-vault-git/) and
|
|
||||||
[without](https://aur.archlinux.org/packages/bitwarden_rs-git/) the vault web interface available.
|
|
||||||
|
|
||||||
## Backing up your vault
|
|
||||||
|
|
||||||
### 1. the sqlite3 database
|
|
||||||
|
|
||||||
The sqlite3 database should be backed up using the proper sqlite3 backup command. This will ensure the database does not become corrupted if the backup happens during a database write.
|
|
||||||
|
|
||||||
```
|
|
||||||
sqlite3 /$DATA_FOLDER/db.sqlite3 ".backup '/$DATA_FOLDER/db-backup/backup.sq3'"
|
|
||||||
```
|
|
||||||
|
|
||||||
This command can be run via a CRON job everyday, however note that it will overwrite the same backup.sq3 file each time. This backup file should therefore be saved via incremental backup either using a CRON job command that appends a timestamp or from another backup app such as Duplicati.
|
|
||||||
|
|
||||||
### 2. the attachments folder
|
|
||||||
|
|
||||||
By default, this is located in `$DATA_FOLDER/attachments`
|
|
||||||
|
|
||||||
### 3. the key files
|
|
||||||
|
|
||||||
This is optional, these are only used to store tokens of users currently logged in, deleting them would simply log each user out forcing them to log in again. By default, these are located in the `$DATA_FOLDER` (by default /data in the docker). There are 3 files: rsa_key.der, rsa_key.pem, rsa_key.pub.der.
|
|
||||||
|
|
||||||
### 4. Icon Cache
|
|
||||||
|
|
||||||
This is optional, the icon cache can re-download itself however if you have a large cache, it may take a long time. By default it is located in `$DATA_FOLDER/icon_cache`
|
|
||||||
|
|
||||||
## Running the server with non-root user
|
|
||||||
|
|
||||||
The root user inside the container is already pretty limited in what it can do, so the default setup should be secure enough. However if you wish to go the extra mile to avoid using root even in container, here's how you can do that:
|
|
||||||
|
|
||||||
1. Create a data folder that's owned by non-root user, so you can use that user to write persistent data. Get the user `id`. In linux you can run `stat <folder_name>` to get/verify the owner ID.
|
|
||||||
2. When you run the container, you need to provide the user ID as one of the parameters. Note that this needs to be in the numeric form and not the user name, because docker would try to find such user defined inside the image, which would likely not be there or it would have different ID than your local user and hence wouldn't be able to write the persistent data. This can be done with the `--user` parameter.
|
|
||||||
3. bitwarden_rs listens on port `80` inside the container by default, this [won't work with non-root user](https://www.w3.org/Daemon/User/Installation/PrivilegedPorts.html), because regular users aren't allowed to open port bellow `1024`. To overcome this, you need to configure server to listen on a different port, you can use `ROCKET_PORT` to do that.
|
|
||||||
|
|
||||||
Here's sample docker run, that uses user with id `1000` and with the port redirection configured, so that inside container the service is listening on port `8080` and docker translates that to external (host) port `80`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run -d --name bitwarden \
|
|
||||||
--user 1000 \
|
|
||||||
-e ROCKET_PORT=8080 \
|
|
||||||
-v /bw-data/:/data/ \
|
|
||||||
-p 80:8080 \
|
|
||||||
mprasil/bitwarden:latest
|
|
||||||
```
|
|
||||||
## Get in touch
|
## Get in touch
|
||||||
|
|
||||||
To ask an question, [raising an issue](https://github.com/dani-garcia/bitwarden_rs/issues/new) is fine, also please report any bugs spotted here.
|
To ask an question, [raising an issue](https://github.com/dani-garcia/bitwarden_rs/issues/new) is fine, also please report any bugs spotted here.
|
||||||
|
|
||||||
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/!cASGtOHlSftdScFNMs:matrix.org) room on Matrix. Feel free to join us!
|
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/#bitwarden_rs:matrix.org) room on Matrix. Feel free to join us!
|
||||||
|
25
azure-pipelines.yml
Normal file
25
azure-pipelines.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
pool:
|
||||||
|
vmImage: 'Ubuntu-16.04'
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- script: |
|
||||||
|
ls -la
|
||||||
|
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $(cat rust-toolchain)
|
||||||
|
echo "##vso[task.prependpath]$HOME/.cargo/bin"
|
||||||
|
displayName: 'Install Rust'
|
||||||
|
|
||||||
|
- script: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libmysql++-dev
|
||||||
|
displayName: Install libmysql
|
||||||
|
|
||||||
|
- script: |
|
||||||
|
rustc -Vv
|
||||||
|
cargo -V
|
||||||
|
displayName: Query rust and cargo versions
|
||||||
|
|
||||||
|
- script : cargo build --features "sqlite"
|
||||||
|
displayName: 'Build project with sqlite backend'
|
||||||
|
|
||||||
|
- script : cargo build --features "mysql"
|
||||||
|
displayName: 'Build project with mysql backend'
|
63
build.rs
Normal file
63
build.rs
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
#[cfg(all(feature = "sqlite", feature = "mysql"))]
|
||||||
|
compile_error!("Can't enable both backends");
|
||||||
|
|
||||||
|
#[cfg(not(any(feature = "sqlite", feature = "mysql")))]
|
||||||
|
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
||||||
|
|
||||||
|
read_git_info().ok();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
||||||
|
let out = Command::new(args[0]).args(&args[1..]).output()?;
|
||||||
|
if !out.status.success() {
|
||||||
|
use std::io::{Error, ErrorKind};
|
||||||
|
return Err(Error::new(ErrorKind::Other, "Command not successful"));
|
||||||
|
}
|
||||||
|
Ok(String::from_utf8(out.stdout).unwrap().trim().to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This method reads info from Git, namely tags, branch, and revision
|
||||||
|
fn read_git_info() -> Result<(), std::io::Error> {
|
||||||
|
// The exact tag for the current commit, can be empty when
|
||||||
|
// the current commit doesn't have an associated tag
|
||||||
|
let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"]).ok();
|
||||||
|
if let Some(ref exact) = exact_tag {
|
||||||
|
println!("cargo:rustc-env=GIT_EXACT_TAG={}", exact);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The last available tag, equal to exact_tag when
|
||||||
|
// the current commit is tagged
|
||||||
|
let last_tag = run(&["git", "describe", "--abbrev=0", "--tags"])?;
|
||||||
|
println!("cargo:rustc-env=GIT_LAST_TAG={}", last_tag);
|
||||||
|
|
||||||
|
// The current branch name
|
||||||
|
let branch = run(&["git", "rev-parse", "--abbrev-ref", "HEAD"])?;
|
||||||
|
println!("cargo:rustc-env=GIT_BRANCH={}", branch);
|
||||||
|
|
||||||
|
// The current git commit hash
|
||||||
|
let rev = run(&["git", "rev-parse", "HEAD"])?;
|
||||||
|
let rev_short = rev.get(..8).unwrap_or_default();
|
||||||
|
println!("cargo:rustc-env=GIT_REV={}", rev_short);
|
||||||
|
|
||||||
|
// Combined version
|
||||||
|
let version = if let Some(exact) = exact_tag {
|
||||||
|
exact
|
||||||
|
} else if &branch != "master" {
|
||||||
|
format!("{}-{} ({})", last_tag, rev_short, branch)
|
||||||
|
} else {
|
||||||
|
format!("{}-{}", last_tag, rev_short)
|
||||||
|
};
|
||||||
|
println!("cargo:rustc-env=GIT_VERSION={}", version);
|
||||||
|
|
||||||
|
// To access these values, use:
|
||||||
|
// env!("GIT_EXACT_TAG")
|
||||||
|
// env!("GIT_LAST_TAG")
|
||||||
|
// env!("GIT_BRANCH")
|
||||||
|
// env!("GIT_REV")
|
||||||
|
// env!("GIT_VERSION")
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
101
docker/aarch64/mysql/Dockerfile
Normal file
101
docker/aarch64/mysql/Dockerfile
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
FROM alpine:3.10 as vault
|
||||||
|
|
||||||
|
ENV VAULT_VERSION "v2.11.0"
|
||||||
|
|
||||||
|
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||||
|
|
||||||
|
RUN apk add --no-cache --upgrade \
|
||||||
|
curl \
|
||||||
|
tar
|
||||||
|
|
||||||
|
RUN mkdir /web-vault
|
||||||
|
WORKDIR /web-vault
|
||||||
|
|
||||||
|
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
RUN curl -L $URL | tar xz
|
||||||
|
RUN ls
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# We need to use the Rust build image, because
|
||||||
|
# we need the Rust compiler and Cargo tooling
|
||||||
|
FROM rust:1.36 as build
|
||||||
|
|
||||||
|
# set mysql backend
|
||||||
|
ARG DB=mysql
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-aarch64-linux-gnu \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Prepare openssl arm64 libs
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture arm64 \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:arm64 \
|
||||||
|
libc6-dev:arm64 \
|
||||||
|
libmariadb-dev:arm64
|
||||||
|
|
||||||
|
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build
|
||||||
|
RUN rustup target add aarch64-unknown-linux-gnu
|
||||||
|
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu -v
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/aarch64-debian:stretch
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
libmariadbclient-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
CMD ["./bitwarden_rs"]
|
101
docker/aarch64/sqlite/Dockerfile
Normal file
101
docker/aarch64/sqlite/Dockerfile
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
FROM alpine:3.10 as vault
|
||||||
|
|
||||||
|
ENV VAULT_VERSION "v2.11.0"
|
||||||
|
|
||||||
|
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||||
|
|
||||||
|
RUN apk add --no-cache --upgrade \
|
||||||
|
curl \
|
||||||
|
tar
|
||||||
|
|
||||||
|
RUN mkdir /web-vault
|
||||||
|
WORKDIR /web-vault
|
||||||
|
|
||||||
|
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
RUN curl -L $URL | tar xz
|
||||||
|
RUN ls
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# We need to use the Rust build image, because
|
||||||
|
# we need the Rust compiler and Cargo tooling
|
||||||
|
FROM rust:1.36 as build
|
||||||
|
|
||||||
|
# set sqlite as default for DB ARG for backward comaptibility
|
||||||
|
ARG DB=sqlite
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-aarch64-linux-gnu \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Prepare openssl arm64 libs
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture arm64 \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:arm64 \
|
||||||
|
libc6-dev:arm64 \
|
||||||
|
libmariadb-dev:arm64
|
||||||
|
|
||||||
|
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build
|
||||||
|
RUN rustup target add aarch64-unknown-linux-gnu
|
||||||
|
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu -v
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/aarch64-debian:stretch
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
libmariadbclient-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
CMD ["./bitwarden_rs"]
|
98
docker/amd64/mysql/Dockerfile
Normal file
98
docker/amd64/mysql/Dockerfile
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
FROM alpine:3.10 as vault
|
||||||
|
|
||||||
|
ENV VAULT_VERSION "v2.11.0"
|
||||||
|
|
||||||
|
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||||
|
|
||||||
|
RUN apk add --no-cache --upgrade \
|
||||||
|
curl \
|
||||||
|
tar
|
||||||
|
|
||||||
|
RUN mkdir /web-vault
|
||||||
|
WORKDIR /web-vault
|
||||||
|
|
||||||
|
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
RUN curl -L $URL | tar xz
|
||||||
|
RUN ls
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# We need to use the Rust build image, because
|
||||||
|
# we need the Rust compiler and Cargo tooling
|
||||||
|
FROM rust:1.36 as build
|
||||||
|
|
||||||
|
# set mysql backend
|
||||||
|
ARG DB=mysql
|
||||||
|
|
||||||
|
# Using bundled SQLite, no need to install it
|
||||||
|
# RUN apt-get update && apt-get install -y\
|
||||||
|
# --no-install-recommends \
|
||||||
|
# sqlite3\
|
||||||
|
# && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Install MySQL package
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libmariadb-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM debian:stretch-slim
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
libmariadbclient-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build app/target/release/bitwarden_rs .
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
CMD ["./bitwarden_rs"]
|
80
docker/amd64/mysql/Dockerfile.alpine
Normal file
80
docker/amd64/mysql/Dockerfile.alpine
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
FROM alpine:3.10 as vault
|
||||||
|
|
||||||
|
ENV VAULT_VERSION "v2.11.0"
|
||||||
|
|
||||||
|
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||||
|
|
||||||
|
RUN apk add --no-cache --upgrade \
|
||||||
|
curl \
|
||||||
|
tar
|
||||||
|
|
||||||
|
RUN mkdir /web-vault
|
||||||
|
WORKDIR /web-vault
|
||||||
|
|
||||||
|
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
RUN curl -L $URL | tar xz
|
||||||
|
RUN ls
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# Musl build image for statically compiled binary
|
||||||
|
FROM clux/muslrust:nightly-2019-07-08 as build
|
||||||
|
|
||||||
|
# set mysql backend
|
||||||
|
ARG DB=mysql
|
||||||
|
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libmysqlclient-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
RUN rustup target add x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Build
|
||||||
|
RUN cargo build --features ${DB} --release
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM alpine:3.10
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
mariadb-connector-c \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
CMD ["./bitwarden_rs"]
|
98
docker/amd64/sqlite/Dockerfile
Normal file
98
docker/amd64/sqlite/Dockerfile
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
FROM alpine:3.10 as vault
|
||||||
|
|
||||||
|
ENV VAULT_VERSION "v2.11.0"
|
||||||
|
|
||||||
|
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||||
|
|
||||||
|
RUN apk add --no-cache --upgrade \
|
||||||
|
curl \
|
||||||
|
tar
|
||||||
|
|
||||||
|
RUN mkdir /web-vault
|
||||||
|
WORKDIR /web-vault
|
||||||
|
|
||||||
|
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
RUN curl -L $URL | tar xz
|
||||||
|
RUN ls
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# We need to use the Rust build image, because
|
||||||
|
# we need the Rust compiler and Cargo tooling
|
||||||
|
FROM rust:1.36 as build
|
||||||
|
|
||||||
|
# set sqlite as default for DB ARG for backward comaptibility
|
||||||
|
ARG DB=sqlite
|
||||||
|
|
||||||
|
# Using bundled SQLite, no need to install it
|
||||||
|
# RUN apt-get update && apt-get install -y\
|
||||||
|
# --no-install-recommends \
|
||||||
|
# sqlite3 \
|
||||||
|
# && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Install MySQL package
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libmariadb-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./
|
||||||
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN cargo build --features ${DB} --release
|
||||||
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Builds again, this time it'll just be
|
||||||
|
# your actual source files being built
|
||||||
|
RUN cargo build --features ${DB} --release
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM debian:stretch-slim
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
libmariadbclient-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build app/target/release/bitwarden_rs .
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
CMD ["./bitwarden_rs"]
|
80
docker/amd64/sqlite/Dockerfile.alpine
Normal file
80
docker/amd64/sqlite/Dockerfile.alpine
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
FROM alpine:3.10 as vault
|
||||||
|
|
||||||
|
ENV VAULT_VERSION "v2.11.0"
|
||||||
|
|
||||||
|
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||||
|
|
||||||
|
RUN apk add --no-cache --upgrade \
|
||||||
|
curl \
|
||||||
|
tar
|
||||||
|
|
||||||
|
RUN mkdir /web-vault
|
||||||
|
WORKDIR /web-vault
|
||||||
|
|
||||||
|
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
RUN curl -L $URL | tar xz
|
||||||
|
RUN ls
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# Musl build image for statically compiled binary
|
||||||
|
FROM clux/muslrust:nightly-2019-07-08 as build
|
||||||
|
|
||||||
|
# set sqlite as default for DB ARG for backward comaptibility
|
||||||
|
ARG DB=sqlite
|
||||||
|
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libmysqlclient-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
RUN rustup target add x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
# Make sure that we actually build the project
|
||||||
|
RUN touch src/main.rs
|
||||||
|
|
||||||
|
# Build
|
||||||
|
RUN cargo build --features ${DB} --release
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM alpine:3.10
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
openssl \
|
||||||
|
mariadb-connector-c \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
CMD ["./bitwarden_rs"]
|
101
docker/armv6/mysql/Dockerfile
Normal file
101
docker/armv6/mysql/Dockerfile
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
FROM alpine:3.10 as vault
|
||||||
|
|
||||||
|
ENV VAULT_VERSION "v2.11.0"
|
||||||
|
|
||||||
|
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||||
|
|
||||||
|
RUN apk add --no-cache --upgrade \
|
||||||
|
curl \
|
||||||
|
tar
|
||||||
|
|
||||||
|
RUN mkdir /web-vault
|
||||||
|
WORKDIR /web-vault
|
||||||
|
|
||||||
|
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
RUN curl -L $URL | tar xz
|
||||||
|
RUN ls
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# We need to use the Rust build image, because
|
||||||
|
# we need the Rust compiler and Cargo tooling
|
||||||
|
FROM rust:1.36 as build
|
||||||
|
|
||||||
|
# set mysql backend
|
||||||
|
ARG DB=mysql
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-arm-linux-gnueabi \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Prepare openssl armel libs
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture armel \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:armel \
|
||||||
|
libc6-dev:armel \
|
||||||
|
libmariadb-dev:armel
|
||||||
|
|
||||||
|
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build
|
||||||
|
RUN rustup target add arm-unknown-linux-gnueabi
|
||||||
|
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi -v
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/rpi-debian:stretch
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
libmariadbclient-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
CMD ["./bitwarden_rs"]
|
101
docker/armv6/sqlite/Dockerfile
Normal file
101
docker/armv6/sqlite/Dockerfile
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
FROM alpine:3.10 as vault
|
||||||
|
|
||||||
|
ENV VAULT_VERSION "v2.11.0"
|
||||||
|
|
||||||
|
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||||
|
|
||||||
|
RUN apk add --no-cache --upgrade \
|
||||||
|
curl \
|
||||||
|
tar
|
||||||
|
|
||||||
|
RUN mkdir /web-vault
|
||||||
|
WORKDIR /web-vault
|
||||||
|
|
||||||
|
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
RUN curl -L $URL | tar xz
|
||||||
|
RUN ls
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# We need to use the Rust build image, because
|
||||||
|
# we need the Rust compiler and Cargo tooling
|
||||||
|
FROM rust:1.36 as build
|
||||||
|
|
||||||
|
# set sqlite as default for DB ARG for backward comaptibility
|
||||||
|
ARG DB=sqlite
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-arm-linux-gnueabi \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Prepare openssl armel libs
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture armel \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:armel \
|
||||||
|
libc6-dev:armel \
|
||||||
|
libmariadb-dev:armel
|
||||||
|
|
||||||
|
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build
|
||||||
|
RUN rustup target add arm-unknown-linux-gnueabi
|
||||||
|
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi -v
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/rpi-debian:stretch
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
libmariadbclient-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
CMD ["./bitwarden_rs"]
|
102
docker/armv7/mysql/Dockerfile
Normal file
102
docker/armv7/mysql/Dockerfile
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
FROM alpine:3.10 as vault
|
||||||
|
|
||||||
|
ENV VAULT_VERSION "v2.11.0"
|
||||||
|
|
||||||
|
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||||
|
|
||||||
|
RUN apk add --no-cache --upgrade \
|
||||||
|
curl \
|
||||||
|
tar
|
||||||
|
|
||||||
|
RUN mkdir /web-vault
|
||||||
|
WORKDIR /web-vault
|
||||||
|
|
||||||
|
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
RUN curl -L $URL | tar xz
|
||||||
|
RUN ls
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# We need to use the Rust build image, because
|
||||||
|
# we need the Rust compiler and Cargo tooling
|
||||||
|
FROM rust:1.36 as build
|
||||||
|
|
||||||
|
# set mysql backend
|
||||||
|
ARG DB=mysql
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-arm-linux-gnueabihf \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Prepare openssl armhf libs
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture armhf \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:armhf \
|
||||||
|
libc6-dev:armhf \
|
||||||
|
libmariadb-dev:armhf
|
||||||
|
|
||||||
|
|
||||||
|
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build
|
||||||
|
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||||
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf -v
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/armv7hf-debian:stretch
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
libmariadbclient-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
CMD ["./bitwarden_rs"]
|
101
docker/armv7/sqlite/Dockerfile
Normal file
101
docker/armv7/sqlite/Dockerfile
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
FROM alpine:3.10 as vault
|
||||||
|
|
||||||
|
ENV VAULT_VERSION "v2.11.0"
|
||||||
|
|
||||||
|
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||||
|
|
||||||
|
RUN apk add --no-cache --upgrade \
|
||||||
|
curl \
|
||||||
|
tar
|
||||||
|
|
||||||
|
RUN mkdir /web-vault
|
||||||
|
WORKDIR /web-vault
|
||||||
|
|
||||||
|
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
RUN curl -L $URL | tar xz
|
||||||
|
RUN ls
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# We need to use the Rust build image, because
|
||||||
|
# we need the Rust compiler and Cargo tooling
|
||||||
|
FROM rust:1.36 as build
|
||||||
|
|
||||||
|
# set sqlite as default for DB ARG for backward comaptibility
|
||||||
|
ARG DB=sqlite
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-arm-linux-gnueabihf \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Prepare openssl armhf libs
|
||||||
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
|
&& dpkg --add-architecture armhf \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libssl-dev:armhf \
|
||||||
|
libc6-dev:armhf \
|
||||||
|
libmariadb-dev:armhf
|
||||||
|
|
||||||
|
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||||
|
ENV CROSS_COMPILE="1"
|
||||||
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||||
|
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build
|
||||||
|
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||||
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf -v
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
FROM balenalib/armv7hf-debian:stretch
|
||||||
|
|
||||||
|
ENV ROCKET_ENV "staging"
|
||||||
|
ENV ROCKET_PORT=80
|
||||||
|
ENV ROCKET_WORKERS=10
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
|
# Install needed libraries
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
openssl \
|
||||||
|
ca-certificates \
|
||||||
|
libmariadbclient-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
COPY Rocket.toml .
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||||
|
|
||||||
|
# Configures the startup!
|
||||||
|
CMD ["./bitwarden_rs"]
|
@@ -1,9 +0,0 @@
|
|||||||
{
|
|
||||||
"appSettings": {
|
|
||||||
"apiUri": "/api",
|
|
||||||
"identityUri": "/identity",
|
|
||||||
"iconsUri": "/icons",
|
|
||||||
"stripeKey": "",
|
|
||||||
"braintreeKey": ""
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,20 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "jsonwebtoken"
|
|
||||||
version = "4.0.1"
|
|
||||||
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
|
||||||
license = "MIT"
|
|
||||||
readme = "README.md"
|
|
||||||
description = "Create and parse JWT in a strongly typed way."
|
|
||||||
homepage = "https://github.com/Keats/rust-jwt"
|
|
||||||
repository = "https://github.com/Keats/rust-jwt"
|
|
||||||
keywords = ["jwt", "web", "api", "token", "json"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
error-chain = { version = "0.11", default-features = false }
|
|
||||||
serde_json = "1.0"
|
|
||||||
serde_derive = "1.0"
|
|
||||||
serde = "1.0"
|
|
||||||
ring = { version = "0.11.0", features = ["rsa_signing", "dev_urandom_fallback"] }
|
|
||||||
base64 = "0.9"
|
|
||||||
untrusted = "0.5"
|
|
||||||
chrono = "0.4"
|
|
@@ -1,21 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2015 Vincent Prouillet
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
@@ -1,120 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use base64;
|
|
||||||
use ring::{rand, digest, hmac, signature};
|
|
||||||
use ring::constant_time::verify_slices_are_equal;
|
|
||||||
use untrusted;
|
|
||||||
|
|
||||||
use errors::{Result, ErrorKind};
|
|
||||||
|
|
||||||
|
|
||||||
/// The algorithms supported for signing/verifying
|
|
||||||
#[derive(Debug, PartialEq, Copy, Clone, Serialize, Deserialize)]
|
|
||||||
pub enum Algorithm {
|
|
||||||
/// HMAC using SHA-256
|
|
||||||
HS256,
|
|
||||||
/// HMAC using SHA-384
|
|
||||||
HS384,
|
|
||||||
/// HMAC using SHA-512
|
|
||||||
HS512,
|
|
||||||
|
|
||||||
/// RSASSA-PKCS1-v1_5 using SHA-256
|
|
||||||
RS256,
|
|
||||||
/// RSASSA-PKCS1-v1_5 using SHA-384
|
|
||||||
RS384,
|
|
||||||
/// RSASSA-PKCS1-v1_5 using SHA-512
|
|
||||||
RS512,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The actual HS signing + encoding
|
|
||||||
fn sign_hmac(alg: &'static digest::Algorithm, key: &[u8], signing_input: &str) -> Result<String> {
|
|
||||||
let signing_key = hmac::SigningKey::new(alg, key);
|
|
||||||
let digest = hmac::sign(&signing_key, signing_input.as_bytes());
|
|
||||||
|
|
||||||
Ok(
|
|
||||||
base64::encode_config::<hmac::Signature>(&digest, base64::URL_SAFE_NO_PAD)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The actual RSA signing + encoding
|
|
||||||
/// Taken from Ring doc https://briansmith.org/rustdoc/ring/signature/index.html
|
|
||||||
fn sign_rsa(alg: Algorithm, key: &[u8], signing_input: &str) -> Result<String> {
|
|
||||||
let ring_alg = match alg {
|
|
||||||
Algorithm::RS256 => &signature::RSA_PKCS1_SHA256,
|
|
||||||
Algorithm::RS384 => &signature::RSA_PKCS1_SHA384,
|
|
||||||
Algorithm::RS512 => &signature::RSA_PKCS1_SHA512,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let key_pair = Arc::new(
|
|
||||||
signature::RSAKeyPair::from_der(untrusted::Input::from(key))
|
|
||||||
.map_err(|_| ErrorKind::InvalidKey)?
|
|
||||||
);
|
|
||||||
let mut signing_state = signature::RSASigningState::new(key_pair)
|
|
||||||
.map_err(|_| ErrorKind::InvalidKey)?;
|
|
||||||
let mut signature = vec![0; signing_state.key_pair().public_modulus_len()];
|
|
||||||
let rng = rand::SystemRandom::new();
|
|
||||||
signing_state.sign(ring_alg, &rng, signing_input.as_bytes(), &mut signature)
|
|
||||||
.map_err(|_| ErrorKind::InvalidKey)?;
|
|
||||||
|
|
||||||
Ok(
|
|
||||||
base64::encode_config::<[u8]>(&signature, base64::URL_SAFE_NO_PAD)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Take the payload of a JWT, sign it using the algorithm given and return
|
|
||||||
/// the base64 url safe encoded of the result.
|
|
||||||
///
|
|
||||||
/// Only use this function if you want to do something other than JWT.
|
|
||||||
pub fn sign(signing_input: &str, key: &[u8], algorithm: Algorithm) -> Result<String> {
|
|
||||||
match algorithm {
|
|
||||||
Algorithm::HS256 => sign_hmac(&digest::SHA256, key, signing_input),
|
|
||||||
Algorithm::HS384 => sign_hmac(&digest::SHA384, key, signing_input),
|
|
||||||
Algorithm::HS512 => sign_hmac(&digest::SHA512, key, signing_input),
|
|
||||||
|
|
||||||
Algorithm::RS256 | Algorithm::RS384 | Algorithm::RS512 => sign_rsa(algorithm, key, signing_input),
|
|
||||||
// TODO: if PKCS1 is made prublic, remove the line above and uncomment below
|
|
||||||
// Algorithm::RS256 => sign_rsa(&signature::RSA_PKCS1_SHA256, key, signing_input),
|
|
||||||
// Algorithm::RS384 => sign_rsa(&signature::RSA_PKCS1_SHA384, key, signing_input),
|
|
||||||
// Algorithm::RS512 => sign_rsa(&signature::RSA_PKCS1_SHA512, key, signing_input),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// See Ring RSA docs for more details
|
|
||||||
fn verify_rsa(alg: &signature::RSAParameters, signature: &str, signing_input: &str, key: &[u8]) -> Result<bool> {
|
|
||||||
let signature_bytes = base64::decode_config(signature, base64::URL_SAFE_NO_PAD)?;
|
|
||||||
let public_key_der = untrusted::Input::from(key);
|
|
||||||
let message = untrusted::Input::from(signing_input.as_bytes());
|
|
||||||
let expected_signature = untrusted::Input::from(signature_bytes.as_slice());
|
|
||||||
|
|
||||||
let res = signature::verify(alg, public_key_der, message, expected_signature);
|
|
||||||
|
|
||||||
Ok(res.is_ok())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compares the signature given with a re-computed signature for HMAC or using the public key
|
|
||||||
/// for RSA.
|
|
||||||
///
|
|
||||||
/// Only use this function if you want to do something other than JWT.
|
|
||||||
///
|
|
||||||
/// `signature` is the signature part of a jwt (text after the second '.')
|
|
||||||
///
|
|
||||||
/// `signing_input` is base64(header) + "." + base64(claims)
|
|
||||||
pub fn verify(signature: &str, signing_input: &str, key: &[u8], algorithm: Algorithm) -> Result<bool> {
|
|
||||||
match algorithm {
|
|
||||||
Algorithm::HS256 | Algorithm::HS384 | Algorithm::HS512 => {
|
|
||||||
// we just re-sign the data with the key and compare if they are equal
|
|
||||||
let signed = sign(signing_input, key, algorithm)?;
|
|
||||||
Ok(verify_slices_are_equal(signature.as_ref(), signed.as_ref()).is_ok())
|
|
||||||
},
|
|
||||||
Algorithm::RS256 => verify_rsa(&signature::RSA_PKCS1_2048_8192_SHA256, signature, signing_input, key),
|
|
||||||
Algorithm::RS384 => verify_rsa(&signature::RSA_PKCS1_2048_8192_SHA384, signature, signing_input, key),
|
|
||||||
Algorithm::RS512 => verify_rsa(&signature::RSA_PKCS1_2048_8192_SHA512, signature, signing_input, key),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Algorithm {
|
|
||||||
fn default() -> Self {
|
|
||||||
Algorithm::HS256
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,68 +0,0 @@
|
|||||||
use base64;
|
|
||||||
use serde_json;
|
|
||||||
use ring;
|
|
||||||
|
|
||||||
error_chain! {
|
|
||||||
errors {
|
|
||||||
/// When a token doesn't have a valid JWT shape
|
|
||||||
InvalidToken {
|
|
||||||
description("invalid token")
|
|
||||||
display("Invalid token")
|
|
||||||
}
|
|
||||||
/// When the signature doesn't match
|
|
||||||
InvalidSignature {
|
|
||||||
description("invalid signature")
|
|
||||||
display("Invalid signature")
|
|
||||||
}
|
|
||||||
/// When the secret given is not a valid RSA key
|
|
||||||
InvalidKey {
|
|
||||||
description("invalid key")
|
|
||||||
display("Invalid Key")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validation error
|
|
||||||
|
|
||||||
/// When a token’s `exp` claim indicates that it has expired
|
|
||||||
ExpiredSignature {
|
|
||||||
description("expired signature")
|
|
||||||
display("Expired Signature")
|
|
||||||
}
|
|
||||||
/// When a token’s `iss` claim does not match the expected issuer
|
|
||||||
InvalidIssuer {
|
|
||||||
description("invalid issuer")
|
|
||||||
display("Invalid Issuer")
|
|
||||||
}
|
|
||||||
/// When a token’s `aud` claim does not match one of the expected audience values
|
|
||||||
InvalidAudience {
|
|
||||||
description("invalid audience")
|
|
||||||
display("Invalid Audience")
|
|
||||||
}
|
|
||||||
/// When a token’s `aud` claim does not match one of the expected audience values
|
|
||||||
InvalidSubject {
|
|
||||||
description("invalid subject")
|
|
||||||
display("Invalid Subject")
|
|
||||||
}
|
|
||||||
/// When a token’s `iat` claim is in the future
|
|
||||||
InvalidIssuedAt {
|
|
||||||
description("invalid issued at")
|
|
||||||
display("Invalid Issued At")
|
|
||||||
}
|
|
||||||
/// When a token’s nbf claim represents a time in the future
|
|
||||||
ImmatureSignature {
|
|
||||||
description("immature signature")
|
|
||||||
display("Immature Signature")
|
|
||||||
}
|
|
||||||
/// When the algorithm in the header doesn't match the one passed to `decode`
|
|
||||||
InvalidAlgorithm {
|
|
||||||
description("Invalid algorithm")
|
|
||||||
display("Invalid Algorithm")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
foreign_links {
|
|
||||||
Unspecified(ring::error::Unspecified) #[doc = "An error happened while signing/verifying a token with RSA"];
|
|
||||||
Base64(base64::DecodeError) #[doc = "An error happened while decoding some base64 text"];
|
|
||||||
Json(serde_json::Error) #[doc = "An error happened while serializing/deserializing JSON"];
|
|
||||||
Utf8(::std::string::FromUtf8Error) #[doc = "An error happened while trying to convert the result of base64 decoding to a String"];
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,64 +0,0 @@
|
|||||||
use crypto::Algorithm;
|
|
||||||
|
|
||||||
|
|
||||||
/// A basic JWT header, the alg defaults to HS256 and typ is automatically
|
|
||||||
/// set to `JWT`. All the other fields are optional.
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
|
||||||
pub struct Header {
|
|
||||||
/// The type of JWS: it can only be "JWT" here
|
|
||||||
///
|
|
||||||
/// Defined in [RFC7515#4.1.9](https://tools.ietf.org/html/rfc7515#section-4.1.9).
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub typ: Option<String>,
|
|
||||||
/// The algorithm used
|
|
||||||
///
|
|
||||||
/// Defined in [RFC7515#4.1.1](https://tools.ietf.org/html/rfc7515#section-4.1.1).
|
|
||||||
pub alg: Algorithm,
|
|
||||||
/// Content type
|
|
||||||
///
|
|
||||||
/// Defined in [RFC7519#5.2](https://tools.ietf.org/html/rfc7519#section-5.2).
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub cty: Option<String>,
|
|
||||||
/// JSON Key URL
|
|
||||||
///
|
|
||||||
/// Defined in [RFC7515#4.1.2](https://tools.ietf.org/html/rfc7515#section-4.1.2).
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub jku: Option<String>,
|
|
||||||
/// Key ID
|
|
||||||
///
|
|
||||||
/// Defined in [RFC7515#4.1.4](https://tools.ietf.org/html/rfc7515#section-4.1.4).
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub kid: Option<String>,
|
|
||||||
/// X.509 URL
|
|
||||||
///
|
|
||||||
/// Defined in [RFC7515#4.1.5](https://tools.ietf.org/html/rfc7515#section-4.1.5).
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub x5u: Option<String>,
|
|
||||||
/// X.509 certificate thumbprint
|
|
||||||
///
|
|
||||||
/// Defined in [RFC7515#4.1.7](https://tools.ietf.org/html/rfc7515#section-4.1.7).
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub x5t: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Header {
|
|
||||||
/// Returns a JWT header with the algorithm given
|
|
||||||
pub fn new(algorithm: Algorithm) -> Header {
|
|
||||||
Header {
|
|
||||||
typ: Some("JWT".to_string()),
|
|
||||||
alg: algorithm,
|
|
||||||
cty: None,
|
|
||||||
jku: None,
|
|
||||||
kid: None,
|
|
||||||
x5u: None,
|
|
||||||
x5t: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Header {
|
|
||||||
/// Returns a JWT header using the default Algorithm, HS256
|
|
||||||
fn default() -> Self {
|
|
||||||
Header::new(Algorithm::default())
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,142 +0,0 @@
|
|||||||
//! Create and parses JWT (JSON Web Tokens)
|
|
||||||
//!
|
|
||||||
//! Documentation: [stable](https://docs.rs/jsonwebtoken/)
|
|
||||||
#![recursion_limit = "300"]
|
|
||||||
#![deny(missing_docs)]
|
|
||||||
#![allow(unused_doc_comments)]
|
|
||||||
#![allow(renamed_and_removed_lints)]
|
|
||||||
|
|
||||||
#[macro_use]
|
|
||||||
extern crate error_chain;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate serde_derive;
|
|
||||||
extern crate serde_json;
|
|
||||||
extern crate serde;
|
|
||||||
extern crate base64;
|
|
||||||
extern crate ring;
|
|
||||||
extern crate untrusted;
|
|
||||||
extern crate chrono;
|
|
||||||
|
|
||||||
/// All the errors, generated using error-chain
|
|
||||||
pub mod errors;
|
|
||||||
mod header;
|
|
||||||
mod crypto;
|
|
||||||
mod serialization;
|
|
||||||
mod validation;
|
|
||||||
|
|
||||||
pub use header::Header;
|
|
||||||
pub use crypto::{
|
|
||||||
Algorithm,
|
|
||||||
sign,
|
|
||||||
verify,
|
|
||||||
};
|
|
||||||
pub use validation::Validation;
|
|
||||||
pub use serialization::TokenData;
|
|
||||||
|
|
||||||
|
|
||||||
use serde::de::DeserializeOwned;
|
|
||||||
use serde::ser::Serialize;
|
|
||||||
|
|
||||||
use errors::{Result, ErrorKind};
|
|
||||||
use serialization::{from_jwt_part, from_jwt_part_claims, to_jwt_part};
|
|
||||||
use validation::{validate};
|
|
||||||
|
|
||||||
|
|
||||||
/// Encode the header and claims given and sign the payload using the algorithm from the header and the key
|
|
||||||
///
|
|
||||||
/// ```rust,ignore
|
|
||||||
/// #[macro_use]
|
|
||||||
/// extern crate serde_derive;
|
|
||||||
/// use jsonwebtoken::{encode, Algorithm, Header};
|
|
||||||
///
|
|
||||||
/// /// #[derive(Debug, Serialize, Deserialize)]
|
|
||||||
/// struct Claims {
|
|
||||||
/// sub: String,
|
|
||||||
/// company: String
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// let my_claims = Claims {
|
|
||||||
/// sub: "b@b.com".to_owned(),
|
|
||||||
/// company: "ACME".to_owned()
|
|
||||||
/// };
|
|
||||||
///
|
|
||||||
/// // my_claims is a struct that implements Serialize
|
|
||||||
/// // This will create a JWT using HS256 as algorithm
|
|
||||||
/// let token = encode(&Header::default(), &my_claims, "secret".as_ref()).unwrap();
|
|
||||||
/// ```
|
|
||||||
pub fn encode<T: Serialize>(header: &Header, claims: &T, key: &[u8]) -> Result<String> {
|
|
||||||
let encoded_header = to_jwt_part(&header)?;
|
|
||||||
let encoded_claims = to_jwt_part(&claims)?;
|
|
||||||
let signing_input = [encoded_header.as_ref(), encoded_claims.as_ref()].join(".");
|
|
||||||
let signature = sign(&*signing_input, key.as_ref(), header.alg)?;
|
|
||||||
|
|
||||||
Ok([signing_input, signature].join("."))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Used in decode: takes the result of a rsplit and ensure we only get 2 parts
|
|
||||||
/// Errors if we don't
|
|
||||||
macro_rules! expect_two {
|
|
||||||
($iter:expr) => {{
|
|
||||||
let mut i = $iter;
|
|
||||||
match (i.next(), i.next(), i.next()) {
|
|
||||||
(Some(first), Some(second), None) => (first, second),
|
|
||||||
_ => return Err(ErrorKind::InvalidToken.into())
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decode a token into a struct containing 2 fields: `claims` and `header`.
|
|
||||||
///
|
|
||||||
/// If the token or its signature is invalid or the claims fail validation, it will return an error.
|
|
||||||
///
|
|
||||||
/// ```rust,ignore
|
|
||||||
/// #[macro_use]
|
|
||||||
/// extern crate serde_derive;
|
|
||||||
/// use jsonwebtoken::{decode, Validation, Algorithm};
|
|
||||||
///
|
|
||||||
/// #[derive(Debug, Serialize, Deserialize)]
|
|
||||||
/// struct Claims {
|
|
||||||
/// sub: String,
|
|
||||||
/// company: String
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// let token = "a.jwt.token".to_string();
|
|
||||||
/// // Claims is a struct that implements Deserialize
|
|
||||||
/// let token_data = decode::<Claims>(&token, "secret", &Validation::new(Algorithm::HS256));
|
|
||||||
/// ```
|
|
||||||
pub fn decode<T: DeserializeOwned>(token: &str, key: &[u8], validation: &Validation) -> Result<TokenData<T>> {
|
|
||||||
let (signature, signing_input) = expect_two!(token.rsplitn(2, '.'));
|
|
||||||
let (claims, header) = expect_two!(signing_input.rsplitn(2, '.'));
|
|
||||||
let header: Header = from_jwt_part(header)?;
|
|
||||||
|
|
||||||
if !verify(signature, signing_input, key, header.alg)? {
|
|
||||||
return Err(ErrorKind::InvalidSignature.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
if !validation.algorithms.contains(&header.alg) {
|
|
||||||
return Err(ErrorKind::InvalidAlgorithm.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
let (decoded_claims, claims_map): (T, _) = from_jwt_part_claims(claims)?;
|
|
||||||
|
|
||||||
validate(&claims_map, validation)?;
|
|
||||||
|
|
||||||
Ok(TokenData { header: header, claims: decoded_claims })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decode a token and return the Header. This is not doing any kind of validation: it is meant to be
|
|
||||||
/// used when you don't know which `alg` the token is using and want to find out.
|
|
||||||
///
|
|
||||||
/// If the token has an invalid format, it will return an error.
|
|
||||||
///
|
|
||||||
/// ```rust,ignore
|
|
||||||
/// use jsonwebtoken::decode_header;
|
|
||||||
///
|
|
||||||
/// let token = "a.jwt.token".to_string();
|
|
||||||
/// let header = decode_header(&token);
|
|
||||||
/// ```
|
|
||||||
pub fn decode_header(token: &str) -> Result<Header> {
|
|
||||||
let (_, signing_input) = expect_two!(token.rsplitn(2, '.'));
|
|
||||||
let (_, header) = expect_two!(signing_input.rsplitn(2, '.'));
|
|
||||||
from_jwt_part(header)
|
|
||||||
}
|
|
@@ -1,42 +0,0 @@
|
|||||||
use base64;
|
|
||||||
use serde::de::DeserializeOwned;
|
|
||||||
use serde::ser::Serialize;
|
|
||||||
use serde_json::{from_str, to_string, Value};
|
|
||||||
use serde_json::map::Map;
|
|
||||||
|
|
||||||
use errors::{Result};
|
|
||||||
use header::Header;
|
|
||||||
|
|
||||||
|
|
||||||
/// The return type of a successful call to decode
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct TokenData<T> {
|
|
||||||
/// The decoded JWT header
|
|
||||||
pub header: Header,
|
|
||||||
/// The decoded JWT claims
|
|
||||||
pub claims: T
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serializes to JSON and encodes to base64
|
|
||||||
pub fn to_jwt_part<T: Serialize>(input: &T) -> Result<String> {
|
|
||||||
let encoded = to_string(input)?;
|
|
||||||
Ok(base64::encode_config(encoded.as_bytes(), base64::URL_SAFE_NO_PAD))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decodes from base64 and deserializes from JSON to a struct
|
|
||||||
pub fn from_jwt_part<B: AsRef<str>, T: DeserializeOwned>(encoded: B) -> Result<T> {
|
|
||||||
let decoded = base64::decode_config(encoded.as_ref(), base64::URL_SAFE_NO_PAD)?;
|
|
||||||
let s = String::from_utf8(decoded)?;
|
|
||||||
|
|
||||||
Ok(from_str(&s)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decodes from base64 and deserializes from JSON to a struct AND a hashmap
|
|
||||||
pub fn from_jwt_part_claims<B: AsRef<str>, T: DeserializeOwned>(encoded: B) -> Result<(T, Map<String, Value>)> {
|
|
||||||
let decoded = base64::decode_config(encoded.as_ref(), base64::URL_SAFE_NO_PAD)?;
|
|
||||||
let s = String::from_utf8(decoded)?;
|
|
||||||
|
|
||||||
let claims: T = from_str(&s)?;
|
|
||||||
let map: Map<_,_> = from_str(&s)?;
|
|
||||||
Ok((claims, map))
|
|
||||||
}
|
|
@@ -1,377 +0,0 @@
|
|||||||
use chrono::Utc;
|
|
||||||
use serde::ser::Serialize;
|
|
||||||
use serde_json::{Value, from_value, to_value};
|
|
||||||
use serde_json::map::Map;
|
|
||||||
|
|
||||||
use errors::{Result, ErrorKind};
|
|
||||||
use crypto::Algorithm;
|
|
||||||
|
|
||||||
|
|
||||||
/// Contains the various validations that are applied after decoding a token.
|
|
||||||
///
|
|
||||||
/// All time validation happen on UTC timestamps.
|
|
||||||
///
|
|
||||||
/// ```rust
|
|
||||||
/// use jsonwebtoken::Validation;
|
|
||||||
///
|
|
||||||
/// // Default value
|
|
||||||
/// let validation = Validation::default();
|
|
||||||
///
|
|
||||||
/// // Changing one parameter
|
|
||||||
/// let mut validation = Validation {leeway: 60, ..Default::default()};
|
|
||||||
///
|
|
||||||
/// // Setting audience
|
|
||||||
/// let mut validation = Validation::default();
|
|
||||||
/// validation.set_audience(&"Me"); // string
|
|
||||||
/// validation.set_audience(&["Me", "You"]); // array of strings
|
|
||||||
/// ```
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
pub struct Validation {
|
|
||||||
/// Add some leeway (in seconds) to the `exp`, `iat` and `nbf` validation to
|
|
||||||
/// account for clock skew.
|
|
||||||
///
|
|
||||||
/// Defaults to `0`.
|
|
||||||
pub leeway: i64,
|
|
||||||
/// Whether to validate the `exp` field.
|
|
||||||
///
|
|
||||||
/// It will return an error if the time in the `exp` field is past.
|
|
||||||
///
|
|
||||||
/// Defaults to `true`.
|
|
||||||
pub validate_exp: bool,
|
|
||||||
/// Whether to validate the `iat` field.
|
|
||||||
///
|
|
||||||
/// It will return an error if the time in the `iat` field is in the future.
|
|
||||||
///
|
|
||||||
/// Defaults to `true`.
|
|
||||||
pub validate_iat: bool,
|
|
||||||
/// Whether to validate the `nbf` field.
|
|
||||||
///
|
|
||||||
/// It will return an error if the current timestamp is before the time in the `nbf` field.
|
|
||||||
///
|
|
||||||
/// Defaults to `true`.
|
|
||||||
pub validate_nbf: bool,
|
|
||||||
/// If it contains a value, the validation will check that the `aud` field is the same as the
|
|
||||||
/// one provided and will error otherwise.
|
|
||||||
/// Since `aud` can be either a String or a Vec<String> in the JWT spec, you will need to use
|
|
||||||
/// the [set_audience](struct.Validation.html#method.set_audience) method to set it.
|
|
||||||
///
|
|
||||||
/// Defaults to `None`.
|
|
||||||
pub aud: Option<Value>,
|
|
||||||
/// If it contains a value, the validation will check that the `iss` field is the same as the
|
|
||||||
/// one provided and will error otherwise.
|
|
||||||
///
|
|
||||||
/// Defaults to `None`.
|
|
||||||
pub iss: Option<String>,
|
|
||||||
/// If it contains a value, the validation will check that the `sub` field is the same as the
|
|
||||||
/// one provided and will error otherwise.
|
|
||||||
///
|
|
||||||
/// Defaults to `None`.
|
|
||||||
pub sub: Option<String>,
|
|
||||||
/// If it contains a value, the validation will check that the `alg` of the header is contained
|
|
||||||
/// in the ones provided and will error otherwise.
|
|
||||||
///
|
|
||||||
/// Defaults to `vec![Algorithm::HS256]`.
|
|
||||||
pub algorithms: Vec<Algorithm>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Validation {
|
|
||||||
/// Create a default validation setup allowing the given alg
|
|
||||||
pub fn new(alg: Algorithm) -> Validation {
|
|
||||||
let mut validation = Validation::default();
|
|
||||||
validation.algorithms = vec![alg];
|
|
||||||
validation
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Since `aud` can be either a String or an array of String in the JWT spec, this method will take
|
|
||||||
/// care of serializing the value.
|
|
||||||
pub fn set_audience<T: Serialize>(&mut self, audience: &T) {
|
|
||||||
self.aud = Some(to_value(audience).unwrap());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Validation {
|
|
||||||
fn default() -> Validation {
|
|
||||||
Validation {
|
|
||||||
leeway: 0,
|
|
||||||
|
|
||||||
validate_exp: true,
|
|
||||||
validate_iat: true,
|
|
||||||
validate_nbf: true,
|
|
||||||
|
|
||||||
iss: None,
|
|
||||||
sub: None,
|
|
||||||
aud: None,
|
|
||||||
|
|
||||||
algorithms: vec![Algorithm::HS256],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
pub fn validate(claims: &Map<String, Value>, options: &Validation) -> Result<()> {
|
|
||||||
let now = Utc::now().timestamp();
|
|
||||||
|
|
||||||
if let Some(iat) = claims.get("iat") {
|
|
||||||
if options.validate_iat && from_value::<i64>(iat.clone())? > now + options.leeway {
|
|
||||||
return Err(ErrorKind::InvalidIssuedAt.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(exp) = claims.get("exp") {
|
|
||||||
if options.validate_exp && from_value::<i64>(exp.clone())? < now - options.leeway {
|
|
||||||
return Err(ErrorKind::ExpiredSignature.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(nbf) = claims.get("nbf") {
|
|
||||||
if options.validate_nbf && from_value::<i64>(nbf.clone())? > now + options.leeway {
|
|
||||||
return Err(ErrorKind::ImmatureSignature.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(iss) = claims.get("iss") {
|
|
||||||
if let Some(ref correct_iss) = options.iss {
|
|
||||||
if from_value::<String>(iss.clone())? != *correct_iss {
|
|
||||||
return Err(ErrorKind::InvalidIssuer.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(sub) = claims.get("sub") {
|
|
||||||
if let Some(ref correct_sub) = options.sub {
|
|
||||||
if from_value::<String>(sub.clone())? != *correct_sub {
|
|
||||||
return Err(ErrorKind::InvalidSubject.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(aud) = claims.get("aud") {
|
|
||||||
if let Some(ref correct_aud) = options.aud {
|
|
||||||
if aud != correct_aud {
|
|
||||||
return Err(ErrorKind::InvalidAudience.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use serde_json::{to_value};
|
|
||||||
use serde_json::map::Map;
|
|
||||||
use chrono::Utc;
|
|
||||||
|
|
||||||
use super::{validate, Validation};
|
|
||||||
|
|
||||||
use errors::ErrorKind;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn iat_in_past_ok() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("iat".to_string(), to_value(Utc::now().timestamp() - 10000).unwrap());
|
|
||||||
let res = validate(&claims, &Validation::default());
|
|
||||||
assert!(res.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn iat_in_future_fails() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("iat".to_string(), to_value(Utc::now().timestamp() + 100000).unwrap());
|
|
||||||
let res = validate(&claims, &Validation::default());
|
|
||||||
assert!(res.is_err());
|
|
||||||
|
|
||||||
match res.unwrap_err().kind() {
|
|
||||||
&ErrorKind::InvalidIssuedAt => (),
|
|
||||||
_ => assert!(false),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn iat_in_future_but_in_leeway_ok() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("iat".to_string(), to_value(Utc::now().timestamp() + 50).unwrap());
|
|
||||||
let validation = Validation {
|
|
||||||
leeway: 1000 * 60,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let res = validate(&claims, &validation);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn exp_in_future_ok() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("exp".to_string(), to_value(Utc::now().timestamp() + 10000).unwrap());
|
|
||||||
let res = validate(&claims, &Validation::default());
|
|
||||||
assert!(res.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn exp_in_past_fails() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("exp".to_string(), to_value(Utc::now().timestamp() - 100000).unwrap());
|
|
||||||
let res = validate(&claims, &Validation::default());
|
|
||||||
assert!(res.is_err());
|
|
||||||
|
|
||||||
match res.unwrap_err().kind() {
|
|
||||||
&ErrorKind::ExpiredSignature => (),
|
|
||||||
_ => assert!(false),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn exp_in_past_but_in_leeway_ok() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("exp".to_string(), to_value(Utc::now().timestamp() - 500).unwrap());
|
|
||||||
let validation = Validation {
|
|
||||||
leeway: 1000 * 60,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let res = validate(&claims, &validation);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn nbf_in_past_ok() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("nbf".to_string(), to_value(Utc::now().timestamp() - 10000).unwrap());
|
|
||||||
let res = validate(&claims, &Validation::default());
|
|
||||||
assert!(res.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn nbf_in_future_fails() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("nbf".to_string(), to_value(Utc::now().timestamp() + 100000).unwrap());
|
|
||||||
let res = validate(&claims, &Validation::default());
|
|
||||||
assert!(res.is_err());
|
|
||||||
|
|
||||||
match res.unwrap_err().kind() {
|
|
||||||
&ErrorKind::ImmatureSignature => (),
|
|
||||||
_ => assert!(false),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn nbf_in_future_but_in_leeway_ok() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("nbf".to_string(), to_value(Utc::now().timestamp() + 500).unwrap());
|
|
||||||
let validation = Validation {
|
|
||||||
leeway: 1000 * 60,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let res = validate(&claims, &validation);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn iss_ok() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("iss".to_string(), to_value("Keats").unwrap());
|
|
||||||
let validation = Validation {
|
|
||||||
iss: Some("Keats".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let res = validate(&claims, &validation);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn iss_not_matching_fails() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("iss".to_string(), to_value("Hacked").unwrap());
|
|
||||||
let validation = Validation {
|
|
||||||
iss: Some("Keats".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let res = validate(&claims, &validation);
|
|
||||||
assert!(res.is_err());
|
|
||||||
|
|
||||||
match res.unwrap_err().kind() {
|
|
||||||
&ErrorKind::InvalidIssuer => (),
|
|
||||||
_ => assert!(false),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn sub_ok() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("sub".to_string(), to_value("Keats").unwrap());
|
|
||||||
let validation = Validation {
|
|
||||||
sub: Some("Keats".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let res = validate(&claims, &validation);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn sub_not_matching_fails() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("sub".to_string(), to_value("Hacked").unwrap());
|
|
||||||
let validation = Validation {
|
|
||||||
sub: Some("Keats".to_string()),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let res = validate(&claims, &validation);
|
|
||||||
assert!(res.is_err());
|
|
||||||
|
|
||||||
match res.unwrap_err().kind() {
|
|
||||||
&ErrorKind::InvalidSubject => (),
|
|
||||||
_ => assert!(false),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn aud_string_ok() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("aud".to_string(), to_value("Everyone").unwrap());
|
|
||||||
let mut validation = Validation::default();
|
|
||||||
validation.set_audience(&"Everyone");
|
|
||||||
let res = validate(&claims, &validation);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn aud_array_of_string_ok() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("aud".to_string(), to_value(["UserA", "UserB"]).unwrap());
|
|
||||||
let mut validation = Validation::default();
|
|
||||||
validation.set_audience(&["UserA", "UserB"]);
|
|
||||||
let res = validate(&claims, &validation);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn aud_type_mismatch_fails() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("aud".to_string(), to_value("Everyone").unwrap());
|
|
||||||
let mut validation = Validation::default();
|
|
||||||
validation.set_audience(&["UserA", "UserB"]);
|
|
||||||
let res = validate(&claims, &validation);
|
|
||||||
assert!(res.is_err());
|
|
||||||
|
|
||||||
match res.unwrap_err().kind() {
|
|
||||||
&ErrorKind::InvalidAudience => (),
|
|
||||||
_ => assert!(false),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn aud_correct_type_not_matching_fails() {
|
|
||||||
let mut claims = Map::new();
|
|
||||||
claims.insert("aud".to_string(), to_value("Everyone").unwrap());
|
|
||||||
let mut validation = Validation::default();
|
|
||||||
validation.set_audience(&"None");
|
|
||||||
let res = validate(&claims, &validation);
|
|
||||||
assert!(res.is_err());
|
|
||||||
|
|
||||||
match res.unwrap_err().kind() {
|
|
||||||
&ErrorKind::InvalidAudience => (),
|
|
||||||
_ => assert!(false),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
62
migrations/mysql/2018-01-14-171611_create_tables/up.sql
Normal file
62
migrations/mysql/2018-01-14-171611_create_tables/up.sql
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
CREATE TABLE users (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
created_at DATETIME NOT NULL,
|
||||||
|
updated_at DATETIME NOT NULL,
|
||||||
|
email VARCHAR(255) NOT NULL UNIQUE,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
password_hash BLOB NOT NULL,
|
||||||
|
salt BLOB NOT NULL,
|
||||||
|
password_iterations INTEGER NOT NULL,
|
||||||
|
password_hint TEXT,
|
||||||
|
`key` TEXT NOT NULL,
|
||||||
|
private_key TEXT,
|
||||||
|
public_key TEXT,
|
||||||
|
totp_secret TEXT,
|
||||||
|
totp_recover TEXT,
|
||||||
|
security_stamp TEXT NOT NULL,
|
||||||
|
equivalent_domains TEXT NOT NULL,
|
||||||
|
excluded_globals TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE devices (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
created_at DATETIME NOT NULL,
|
||||||
|
updated_at DATETIME NOT NULL,
|
||||||
|
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
type INTEGER NOT NULL,
|
||||||
|
push_token TEXT,
|
||||||
|
refresh_token TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE ciphers (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
created_at DATETIME NOT NULL,
|
||||||
|
updated_at DATETIME NOT NULL,
|
||||||
|
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||||
|
folder_uuid CHAR(36) REFERENCES folders (uuid),
|
||||||
|
organization_uuid CHAR(36),
|
||||||
|
type INTEGER NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
fields TEXT,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
favorite BOOLEAN NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE attachments (
|
||||||
|
id CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
|
||||||
|
file_name TEXT NOT NULL,
|
||||||
|
file_size INTEGER NOT NULL
|
||||||
|
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE folders (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
created_at DATETIME NOT NULL,
|
||||||
|
updated_at DATETIME NOT NULL,
|
||||||
|
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||||
|
name TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
@@ -0,0 +1,30 @@
|
|||||||
|
CREATE TABLE collections (
|
||||||
|
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
|
||||||
|
org_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
|
||||||
|
name TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE organizations (
|
||||||
|
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
billing_email TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE users_collections (
|
||||||
|
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||||
|
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
|
||||||
|
PRIMARY KEY (user_uuid, collection_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE users_organizations (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||||
|
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
|
||||||
|
|
||||||
|
access_all BOOLEAN NOT NULL,
|
||||||
|
`key` TEXT NOT NULL,
|
||||||
|
status INTEGER NOT NULL,
|
||||||
|
type INTEGER NOT NULL,
|
||||||
|
|
||||||
|
UNIQUE (user_uuid, org_uuid)
|
||||||
|
);
|
@@ -0,0 +1,34 @@
|
|||||||
|
ALTER TABLE ciphers RENAME TO oldCiphers;
|
||||||
|
|
||||||
|
CREATE TABLE ciphers (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
created_at DATETIME NOT NULL,
|
||||||
|
updated_at DATETIME NOT NULL,
|
||||||
|
user_uuid CHAR(36) REFERENCES users (uuid), -- Make this optional
|
||||||
|
organization_uuid CHAR(36) REFERENCES organizations (uuid), -- Add reference to orgs table
|
||||||
|
-- Remove folder_uuid
|
||||||
|
type INTEGER NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
fields TEXT,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
favorite BOOLEAN NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE folders_ciphers (
|
||||||
|
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
|
||||||
|
folder_uuid CHAR(36) NOT NULL REFERENCES folders (uuid),
|
||||||
|
|
||||||
|
PRIMARY KEY (cipher_uuid, folder_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO ciphers (uuid, created_at, updated_at, user_uuid, organization_uuid, type, name, notes, fields, data, favorite)
|
||||||
|
SELECT uuid, created_at, updated_at, user_uuid, organization_uuid, type, name, notes, fields, data, favorite FROM oldCiphers;
|
||||||
|
|
||||||
|
INSERT INTO folders_ciphers (cipher_uuid, folder_uuid)
|
||||||
|
SELECT uuid, folder_uuid FROM oldCiphers WHERE folder_uuid IS NOT NULL;
|
||||||
|
|
||||||
|
|
||||||
|
DROP TABLE oldCiphers;
|
||||||
|
|
||||||
|
ALTER TABLE users_collections ADD COLUMN read_only BOOLEAN NOT NULL DEFAULT 0; -- False
|
@@ -0,0 +1,5 @@
|
|||||||
|
CREATE TABLE ciphers_collections (
|
||||||
|
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
|
||||||
|
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
|
||||||
|
PRIMARY KEY (cipher_uuid, collection_uuid)
|
||||||
|
);
|
@@ -0,0 +1,14 @@
|
|||||||
|
ALTER TABLE attachments RENAME TO oldAttachments;
|
||||||
|
|
||||||
|
CREATE TABLE attachments (
|
||||||
|
id CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
|
||||||
|
file_name TEXT NOT NULL,
|
||||||
|
file_size INTEGER NOT NULL
|
||||||
|
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO attachments (id, cipher_uuid, file_name, file_size)
|
||||||
|
SELECT id, cipher_uuid, file_name, file_size FROM oldAttachments;
|
||||||
|
|
||||||
|
DROP TABLE oldAttachments;
|
@@ -0,0 +1,15 @@
|
|||||||
|
CREATE TABLE twofactor (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||||
|
type INTEGER NOT NULL,
|
||||||
|
enabled BOOLEAN NOT NULL,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
|
||||||
|
UNIQUE (user_uuid, type)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
INSERT INTO twofactor (uuid, user_uuid, type, enabled, data)
|
||||||
|
SELECT UUID(), uuid, 0, 1, u.totp_secret FROM users u where u.totp_secret IS NOT NULL;
|
||||||
|
|
||||||
|
UPDATE users SET totp_secret = NULL; -- Instead of recreating the table, just leave the columns empty
|
3
migrations/mysql/2018-08-27-172114_update_ciphers/up.sql
Normal file
3
migrations/mysql/2018-08-27-172114_update_ciphers/up.sql
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN
|
||||||
|
password_history TEXT;
|
1
migrations/mysql/2018-09-10-111213_add_invites/down.sql
Normal file
1
migrations/mysql/2018-09-10-111213_add_invites/down.sql
Normal file
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE invitations;
|
3
migrations/mysql/2018-09-10-111213_add_invites/up.sql
Normal file
3
migrations/mysql/2018-09-10-111213_add_invites/up.sql
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
CREATE TABLE invitations (
|
||||||
|
email VARCHAR(255) NOT NULL PRIMARY KEY
|
||||||
|
);
|
@@ -0,0 +1,7 @@
|
|||||||
|
ALTER TABLE users
|
||||||
|
ADD COLUMN
|
||||||
|
client_kdf_type INTEGER NOT NULL DEFAULT 0; -- PBKDF2
|
||||||
|
|
||||||
|
ALTER TABLE users
|
||||||
|
ADD COLUMN
|
||||||
|
client_kdf_iter INTEGER NOT NULL DEFAULT 5000;
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE attachments
|
||||||
|
ADD COLUMN
|
||||||
|
`key` TEXT;
|
@@ -0,0 +1,7 @@
|
|||||||
|
ALTER TABLE attachments CHANGE COLUMN akey `key` TEXT;
|
||||||
|
ALTER TABLE ciphers CHANGE COLUMN atype type INTEGER NOT NULL;
|
||||||
|
ALTER TABLE devices CHANGE COLUMN atype type INTEGER NOT NULL;
|
||||||
|
ALTER TABLE twofactor CHANGE COLUMN atype type INTEGER NOT NULL;
|
||||||
|
ALTER TABLE users CHANGE COLUMN akey `key` TEXT;
|
||||||
|
ALTER TABLE users_organizations CHANGE COLUMN akey `key` TEXT;
|
||||||
|
ALTER TABLE users_organizations CHANGE COLUMN atype type INTEGER NOT NULL;
|
@@ -0,0 +1,7 @@
|
|||||||
|
ALTER TABLE attachments CHANGE COLUMN `key` akey TEXT;
|
||||||
|
ALTER TABLE ciphers CHANGE COLUMN type atype INTEGER NOT NULL;
|
||||||
|
ALTER TABLE devices CHANGE COLUMN type atype INTEGER NOT NULL;
|
||||||
|
ALTER TABLE twofactor CHANGE COLUMN type atype INTEGER NOT NULL;
|
||||||
|
ALTER TABLE users CHANGE COLUMN `key` akey TEXT;
|
||||||
|
ALTER TABLE users_organizations CHANGE COLUMN `key` akey TEXT;
|
||||||
|
ALTER TABLE users_organizations CHANGE COLUMN type atype INTEGER NOT NULL;
|
@@ -0,0 +1,9 @@
|
|||||||
|
DROP TABLE users;
|
||||||
|
|
||||||
|
DROP TABLE devices;
|
||||||
|
|
||||||
|
DROP TABLE ciphers;
|
||||||
|
|
||||||
|
DROP TABLE attachments;
|
||||||
|
|
||||||
|
DROP TABLE folders;
|
@@ -0,0 +1,8 @@
|
|||||||
|
DROP TABLE collections;
|
||||||
|
|
||||||
|
DROP TABLE organizations;
|
||||||
|
|
||||||
|
|
||||||
|
DROP TABLE users_collections;
|
||||||
|
|
||||||
|
DROP TABLE users_organizations;
|
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE ciphers_collections;
|
@@ -0,0 +1 @@
|
|||||||
|
-- This file should undo anything in `up.sql`
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE devices
|
||||||
|
ADD COLUMN
|
||||||
|
twofactor_remember TEXT;
|
@@ -0,0 +1,8 @@
|
|||||||
|
UPDATE users
|
||||||
|
SET totp_secret = (
|
||||||
|
SELECT twofactor.data FROM twofactor
|
||||||
|
WHERE twofactor.type = 0
|
||||||
|
AND twofactor.user_uuid = users.uuid
|
||||||
|
);
|
||||||
|
|
||||||
|
DROP TABLE twofactor;
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN
|
||||||
|
password_history TEXT;
|
1
migrations/sqlite/2018-09-10-111213_add_invites/down.sql
Normal file
1
migrations/sqlite/2018-09-10-111213_add_invites/down.sql
Normal file
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE invitations;
|
3
migrations/sqlite/2018-09-10-111213_add_invites/up.sql
Normal file
3
migrations/sqlite/2018-09-10-111213_add_invites/up.sql
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
CREATE TABLE invitations (
|
||||||
|
email TEXT NOT NULL PRIMARY KEY
|
||||||
|
);
|
@@ -0,0 +1,7 @@
|
|||||||
|
ALTER TABLE users
|
||||||
|
ADD COLUMN
|
||||||
|
client_kdf_type INTEGER NOT NULL DEFAULT 0; -- PBKDF2
|
||||||
|
|
||||||
|
ALTER TABLE users
|
||||||
|
ADD COLUMN
|
||||||
|
client_kdf_iter INTEGER NOT NULL DEFAULT 5000;
|
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE attachments
|
||||||
|
ADD COLUMN
|
||||||
|
key TEXT;
|
@@ -0,0 +1,7 @@
|
|||||||
|
ALTER TABLE attachments RENAME COLUMN akey TO key;
|
||||||
|
ALTER TABLE ciphers RENAME COLUMN atype TO type;
|
||||||
|
ALTER TABLE devices RENAME COLUMN atype TO type;
|
||||||
|
ALTER TABLE twofactor RENAME COLUMN atype TO type;
|
||||||
|
ALTER TABLE users RENAME COLUMN akey TO key;
|
||||||
|
ALTER TABLE users_organizations RENAME COLUMN akey TO key;
|
||||||
|
ALTER TABLE users_organizations RENAME COLUMN atype TO type;
|
@@ -0,0 +1,7 @@
|
|||||||
|
ALTER TABLE attachments RENAME COLUMN key TO akey;
|
||||||
|
ALTER TABLE ciphers RENAME COLUMN type TO atype;
|
||||||
|
ALTER TABLE devices RENAME COLUMN type TO atype;
|
||||||
|
ALTER TABLE twofactor RENAME COLUMN type TO atype;
|
||||||
|
ALTER TABLE users RENAME COLUMN key TO akey;
|
||||||
|
ALTER TABLE users_organizations RENAME COLUMN key TO akey;
|
||||||
|
ALTER TABLE users_organizations RENAME COLUMN type TO atype;
|
@@ -1 +1 @@
|
|||||||
nightly-2018-06-26
|
nightly-2019-08-18
|
||||||
|
1
rustfmt.toml
Normal file
1
rustfmt.toml
Normal file
@@ -0,0 +1 @@
|
|||||||
|
max_width = 120
|
268
src/api/admin.rs
Normal file
268
src/api/admin.rs
Normal file
@@ -0,0 +1,268 @@
|
|||||||
|
use serde_json::Value;
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
use rocket::http::{Cookie, Cookies, SameSite};
|
||||||
|
use rocket::request::{self, FlashMessage, Form, FromRequest, Request};
|
||||||
|
use rocket::response::{content::Html, Flash, Redirect};
|
||||||
|
use rocket::{Outcome, Route};
|
||||||
|
use rocket_contrib::json::Json;
|
||||||
|
|
||||||
|
use crate::api::{ApiResult, EmptyResult, JsonResult};
|
||||||
|
use crate::auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp};
|
||||||
|
use crate::config::ConfigBuilder;
|
||||||
|
use crate::db::{backup_database, models::*, DbConn};
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::mail;
|
||||||
|
use crate::CONFIG;
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<Route> {
|
||||||
|
if CONFIG.admin_token().is_none() && !CONFIG.disable_admin_token() {
|
||||||
|
return routes![admin_disabled];
|
||||||
|
}
|
||||||
|
|
||||||
|
routes![
|
||||||
|
admin_login,
|
||||||
|
get_users,
|
||||||
|
post_admin_login,
|
||||||
|
admin_page,
|
||||||
|
invite_user,
|
||||||
|
delete_user,
|
||||||
|
deauth_user,
|
||||||
|
remove_2fa,
|
||||||
|
update_revision_users,
|
||||||
|
post_config,
|
||||||
|
delete_config,
|
||||||
|
backup_db,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref CAN_BACKUP: bool = cfg!(feature = "sqlite") && Command::new("sqlite").arg("-version").status().is_ok();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/")]
|
||||||
|
fn admin_disabled() -> &'static str {
|
||||||
|
"The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it"
|
||||||
|
}
|
||||||
|
|
||||||
|
const COOKIE_NAME: &str = "BWRS_ADMIN";
|
||||||
|
const ADMIN_PATH: &str = "/admin";
|
||||||
|
|
||||||
|
const BASE_TEMPLATE: &str = "admin/base";
|
||||||
|
const VERSION: Option<&str> = option_env!("GIT_VERSION");
|
||||||
|
|
||||||
|
#[get("/", rank = 2)]
|
||||||
|
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||||
|
// If there is an error, show it
|
||||||
|
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
||||||
|
let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg});
|
||||||
|
|
||||||
|
// Return the page
|
||||||
|
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
||||||
|
Ok(Html(text))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(FromForm)]
|
||||||
|
struct LoginForm {
|
||||||
|
token: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/", data = "<data>")]
|
||||||
|
fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -> Result<Redirect, Flash<Redirect>> {
|
||||||
|
let data = data.into_inner();
|
||||||
|
|
||||||
|
// If the token is invalid, redirect to login page
|
||||||
|
if !_validate_token(&data.token) {
|
||||||
|
error!("Invalid admin token. IP: {}", ip.ip);
|
||||||
|
Err(Flash::error(
|
||||||
|
Redirect::to(ADMIN_PATH),
|
||||||
|
"Invalid admin token, please try again.",
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
// If the token received is valid, generate JWT and save it as a cookie
|
||||||
|
let claims = generate_admin_claims();
|
||||||
|
let jwt = encode_jwt(&claims);
|
||||||
|
|
||||||
|
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||||
|
.path(ADMIN_PATH)
|
||||||
|
.max_age(chrono::Duration::minutes(20))
|
||||||
|
.same_site(SameSite::Strict)
|
||||||
|
.http_only(true)
|
||||||
|
.finish();
|
||||||
|
|
||||||
|
cookies.add(cookie);
|
||||||
|
Ok(Redirect::to(ADMIN_PATH))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn _validate_token(token: &str) -> bool {
|
||||||
|
match CONFIG.admin_token().as_ref() {
|
||||||
|
None => false,
|
||||||
|
Some(t) => crate::crypto::ct_eq(t.trim(), token.trim()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct AdminTemplateData {
|
||||||
|
page_content: String,
|
||||||
|
version: Option<&'static str>,
|
||||||
|
users: Vec<Value>,
|
||||||
|
config: Value,
|
||||||
|
can_backup: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AdminTemplateData {
|
||||||
|
fn new(users: Vec<Value>) -> Self {
|
||||||
|
Self {
|
||||||
|
page_content: String::from("admin/page"),
|
||||||
|
version: VERSION,
|
||||||
|
users,
|
||||||
|
config: CONFIG.prepare_json(),
|
||||||
|
can_backup: *CAN_BACKUP,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render(self) -> Result<String, Error> {
|
||||||
|
CONFIG.render_template(BASE_TEMPLATE, &self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/", rank = 1)]
|
||||||
|
fn admin_page(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
|
let users = User::get_all(&conn);
|
||||||
|
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||||
|
|
||||||
|
let text = AdminTemplateData::new(users_json).render()?;
|
||||||
|
Ok(Html(text))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct InviteData {
|
||||||
|
email: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/invite", data = "<data>")]
|
||||||
|
fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let data: InviteData = data.into_inner();
|
||||||
|
let email = data.email.clone();
|
||||||
|
if User::find_by_mail(&data.email, &conn).is_some() {
|
||||||
|
err!("User already exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !CONFIG.invitations_allowed() {
|
||||||
|
err!("Invitations are not allowed")
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut user = User::new(email);
|
||||||
|
user.save(&conn)?;
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
let org_name = "bitwarden_rs";
|
||||||
|
mail::send_invite(&user.email, &user.uuid, None, None, &org_name, None)
|
||||||
|
} else {
|
||||||
|
let invitation = Invitation::new(data.email);
|
||||||
|
invitation.save(&conn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/users")]
|
||||||
|
fn get_users(_token: AdminToken, conn: DbConn) -> JsonResult {
|
||||||
|
let users = User::get_all(&conn);
|
||||||
|
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||||
|
|
||||||
|
Ok(Json(Value::Array(users_json)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/users/<uuid>/delete")]
|
||||||
|
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let user = match User::find_by_uuid(&uuid, &conn) {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("User doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
user.delete(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/users/<uuid>/deauth")]
|
||||||
|
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let mut user = match User::find_by_uuid(&uuid, &conn) {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("User doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
|
user.reset_security_stamp();
|
||||||
|
|
||||||
|
user.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/users/<uuid>/remove-2fa")]
|
||||||
|
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let mut user = match User::find_by_uuid(&uuid, &conn) {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("User doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
|
user.totp_recover = None;
|
||||||
|
user.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/users/update_revision")]
|
||||||
|
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
User::update_all_revisions(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/config", data = "<data>")]
|
||||||
|
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||||
|
let data: ConfigBuilder = data.into_inner();
|
||||||
|
CONFIG.update_config(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/config/delete")]
|
||||||
|
fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||||
|
CONFIG.delete_user_config()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/config/backup_db")]
|
||||||
|
fn backup_db(_token: AdminToken) -> EmptyResult {
|
||||||
|
if *CAN_BACKUP {
|
||||||
|
backup_database()
|
||||||
|
} else {
|
||||||
|
err!("Can't back up current DB (either it's not SQLite or the 'sqlite' binary is not present)");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct AdminToken {}
|
||||||
|
|
||||||
|
impl<'a, 'r> FromRequest<'a, 'r> for AdminToken {
|
||||||
|
type Error = &'static str;
|
||||||
|
|
||||||
|
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||||
|
if CONFIG.disable_admin_token() {
|
||||||
|
Outcome::Success(AdminToken {})
|
||||||
|
} else {
|
||||||
|
let mut cookies = request.cookies();
|
||||||
|
|
||||||
|
let access_token = match cookies.get(COOKIE_NAME) {
|
||||||
|
Some(cookie) => cookie.value(),
|
||||||
|
None => return Outcome::Forward(()), // If there is no cookie, redirect to login
|
||||||
|
};
|
||||||
|
|
||||||
|
let ip = match request.guard::<ClientIp>() {
|
||||||
|
Outcome::Success(ip) => ip.ip,
|
||||||
|
_ => err_handler!("Error getting Client IP"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if decode_admin(access_token).is_err() {
|
||||||
|
// Remove admin cookie
|
||||||
|
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||||
|
error!("Invalid or expired admin JWT. IP: {}.", ip);
|
||||||
|
return Outcome::Forward(());
|
||||||
|
}
|
||||||
|
|
||||||
|
Outcome::Success(AdminToken {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -1,22 +1,51 @@
|
|||||||
use rocket_contrib::Json;
|
use rocket_contrib::json::Json;
|
||||||
|
|
||||||
use db::DbConn;
|
use crate::db::models::*;
|
||||||
use db::models::*;
|
use crate::db::DbConn;
|
||||||
|
|
||||||
use api::{PasswordData, JsonResult, EmptyResult, JsonUpcase, NumberOrString};
|
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType};
|
||||||
use auth::Headers;
|
use crate::auth::{decode_invite, Headers};
|
||||||
|
use crate::mail;
|
||||||
|
|
||||||
use CONFIG;
|
use crate::CONFIG;
|
||||||
|
|
||||||
|
use rocket::Route;
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<Route> {
|
||||||
|
routes![
|
||||||
|
register,
|
||||||
|
profile,
|
||||||
|
put_profile,
|
||||||
|
post_profile,
|
||||||
|
get_public_keys,
|
||||||
|
post_keys,
|
||||||
|
post_password,
|
||||||
|
post_kdf,
|
||||||
|
post_rotatekey,
|
||||||
|
post_sstamp,
|
||||||
|
post_email_token,
|
||||||
|
post_email,
|
||||||
|
delete_account,
|
||||||
|
post_delete_account,
|
||||||
|
revision_date,
|
||||||
|
password_hint,
|
||||||
|
prelogin,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct RegisterData {
|
struct RegisterData {
|
||||||
Email: String,
|
Email: String,
|
||||||
|
Kdf: Option<i32>,
|
||||||
|
KdfIterations: Option<i32>,
|
||||||
Key: String,
|
Key: String,
|
||||||
Keys: Option<KeysData>,
|
Keys: Option<KeysData>,
|
||||||
MasterPasswordHash: String,
|
MasterPasswordHash: String,
|
||||||
MasterPasswordHint: Option<String>,
|
MasterPasswordHint: Option<String>,
|
||||||
Name: Option<String>,
|
Name: Option<String>,
|
||||||
|
Token: Option<String>,
|
||||||
|
OrganizationUserId: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
@@ -30,15 +59,54 @@ struct KeysData {
|
|||||||
fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||||
let data: RegisterData = data.into_inner().data;
|
let data: RegisterData = data.into_inner().data;
|
||||||
|
|
||||||
if !CONFIG.signups_allowed {
|
let mut user = match User::find_by_mail(&data.Email, &conn) {
|
||||||
err!("Signups not allowed")
|
Some(user) => {
|
||||||
|
if !user.password_hash.is_empty() {
|
||||||
|
err!("User already exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(token) = data.Token {
|
||||||
|
let claims = decode_invite(&token)?;
|
||||||
|
if claims.email == data.Email {
|
||||||
|
user
|
||||||
|
} else {
|
||||||
|
err!("Registration email does not match invite email")
|
||||||
|
}
|
||||||
|
} else if Invitation::take(&data.Email, &conn) {
|
||||||
|
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).iter_mut() {
|
||||||
|
user_org.status = UserOrgStatus::Accepted as i32;
|
||||||
|
user_org.save(&conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
user
|
||||||
|
} else if CONFIG.signups_allowed() {
|
||||||
|
err!("Account with this email already exists")
|
||||||
|
} else {
|
||||||
|
err!("Registration not allowed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
if CONFIG.signups_allowed() || Invitation::take(&data.Email, &conn) {
|
||||||
|
User::new(data.Email.clone())
|
||||||
|
} else {
|
||||||
|
err!("Registration not allowed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Make sure we don't leave a lingering invitation.
|
||||||
|
Invitation::take(&data.Email, &conn);
|
||||||
|
|
||||||
|
if let Some(client_kdf_iter) = data.KdfIterations {
|
||||||
|
user.client_kdf_iter = client_kdf_iter;
|
||||||
}
|
}
|
||||||
|
|
||||||
if User::find_by_mail(&data.Email, &conn).is_some() {
|
if let Some(client_kdf_type) = data.Kdf {
|
||||||
err!("Email already exists")
|
user.client_kdf_type = client_kdf_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut user = User::new(data.Email, data.Key, data.MasterPasswordHash);
|
user.set_password(&data.MasterPasswordHash);
|
||||||
|
user.akey = data.Key;
|
||||||
|
|
||||||
// Add extra fields if present
|
// Add extra fields if present
|
||||||
if let Some(name) = data.Name {
|
if let Some(name) = data.Name {
|
||||||
@@ -54,9 +122,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
user.public_key = Some(keys.PublicKey);
|
user.public_key = Some(keys.PublicKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
user.save(&conn);
|
user.save(&conn)
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/accounts/profile")]
|
#[get("/accounts/profile")]
|
||||||
@@ -68,11 +134,16 @@ fn profile(headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct ProfileData {
|
struct ProfileData {
|
||||||
#[serde(rename = "Culture")]
|
#[serde(rename = "Culture")]
|
||||||
_Culture: String, // Ignored, always use en-US
|
_Culture: String, // Ignored, always use en-US
|
||||||
MasterPasswordHint: Option<String>,
|
MasterPasswordHint: Option<String>,
|
||||||
Name: String,
|
Name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[put("/accounts/profile", data = "<data>")]
|
||||||
|
fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
post_profile(data, headers, conn)
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/accounts/profile", data = "<data>")]
|
#[post("/accounts/profile", data = "<data>")]
|
||||||
fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let data: ProfileData = data.into_inner().data;
|
let data: ProfileData = data.into_inner().data;
|
||||||
@@ -80,9 +151,11 @@ fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -
|
|||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
user.name = data.Name;
|
user.name = data.Name;
|
||||||
user.password_hint = data.MasterPasswordHint;
|
user.password_hint = match data.MasterPasswordHint {
|
||||||
user.save(&conn);
|
Some(ref h) if h.is_empty() => None,
|
||||||
|
_ => data.MasterPasswordHint,
|
||||||
|
};
|
||||||
|
user.save(&conn)?;
|
||||||
Ok(Json(user.to_json(&conn)))
|
Ok(Json(user.to_json(&conn)))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -90,7 +163,7 @@ fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -
|
|||||||
fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let user = match User::find_by_uuid(&uuid, &conn) {
|
let user = match User::find_by_uuid(&uuid, &conn) {
|
||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
None => err!("User doesn't exist")
|
None => err!("User doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
@@ -109,8 +182,7 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
|
|||||||
user.private_key = Some(data.EncryptedPrivateKey);
|
user.private_key = Some(data.EncryptedPrivateKey);
|
||||||
user.public_key = Some(data.PublicKey);
|
user.public_key = Some(data.PublicKey);
|
||||||
|
|
||||||
user.save(&conn);
|
user.save(&conn)?;
|
||||||
|
|
||||||
Ok(Json(user.to_json(&conn)))
|
Ok(Json(user.to_json(&conn)))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,10 +204,113 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
|||||||
}
|
}
|
||||||
|
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash);
|
||||||
user.key = data.Key;
|
user.akey = data.Key;
|
||||||
user.save(&conn);
|
user.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct ChangeKdfData {
|
||||||
|
Kdf: i32,
|
||||||
|
KdfIterations: i32,
|
||||||
|
|
||||||
|
MasterPasswordHash: String,
|
||||||
|
NewMasterPasswordHash: String,
|
||||||
|
Key: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/kdf", data = "<data>")]
|
||||||
|
fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
|
let data: ChangeKdfData = data.into_inner().data;
|
||||||
|
let mut user = headers.user;
|
||||||
|
|
||||||
|
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
|
err!("Invalid password")
|
||||||
|
}
|
||||||
|
|
||||||
|
user.client_kdf_iter = data.KdfIterations;
|
||||||
|
user.client_kdf_type = data.Kdf;
|
||||||
|
user.set_password(&data.NewMasterPasswordHash);
|
||||||
|
user.akey = data.Key;
|
||||||
|
user.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct UpdateFolderData {
|
||||||
|
Id: String,
|
||||||
|
Name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
use super::ciphers::CipherData;
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct KeyData {
|
||||||
|
Ciphers: Vec<CipherData>,
|
||||||
|
Folders: Vec<UpdateFolderData>,
|
||||||
|
Key: String,
|
||||||
|
PrivateKey: String,
|
||||||
|
MasterPasswordHash: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/key", data = "<data>")]
|
||||||
|
fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
let data: KeyData = data.into_inner().data;
|
||||||
|
|
||||||
|
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
|
err!("Invalid password")
|
||||||
|
}
|
||||||
|
|
||||||
|
let user_uuid = &headers.user.uuid;
|
||||||
|
|
||||||
|
// Update folder data
|
||||||
|
for folder_data in data.Folders {
|
||||||
|
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &conn) {
|
||||||
|
Some(folder) => folder,
|
||||||
|
None => err!("Folder doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if &saved_folder.user_uuid != user_uuid {
|
||||||
|
err!("The folder is not owned by the user")
|
||||||
|
}
|
||||||
|
|
||||||
|
saved_folder.name = folder_data.Name;
|
||||||
|
saved_folder.save(&conn)?
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update cipher data
|
||||||
|
use super::ciphers::update_cipher_from_data;
|
||||||
|
|
||||||
|
for cipher_data in data.Ciphers {
|
||||||
|
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &conn) {
|
||||||
|
Some(cipher) => cipher,
|
||||||
|
None => err!("Cipher doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if saved_cipher.user_uuid.as_ref().unwrap() != user_uuid {
|
||||||
|
err!("The cipher is not owned by the user")
|
||||||
|
}
|
||||||
|
|
||||||
|
update_cipher_from_data(
|
||||||
|
&mut saved_cipher,
|
||||||
|
cipher_data,
|
||||||
|
&headers,
|
||||||
|
false,
|
||||||
|
&conn,
|
||||||
|
&nt,
|
||||||
|
UpdateType::CipherUpdate,
|
||||||
|
)?
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update user data
|
||||||
|
let mut user = headers.user;
|
||||||
|
|
||||||
|
user.akey = data.Key;
|
||||||
|
user.private_key = Some(data.PrivateKey);
|
||||||
|
user.reset_security_stamp();
|
||||||
|
|
||||||
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/security-stamp", data = "<data>")]
|
#[post("/accounts/security-stamp", data = "<data>")]
|
||||||
@@ -147,10 +322,9 @@ fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -
|
|||||||
err!("Invalid password")
|
err!("Invalid password")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
user.save(&conn);
|
user.save(&conn)
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -203,14 +377,17 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
|||||||
user.email = data.NewEmail;
|
user.email = data.NewEmail;
|
||||||
|
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash);
|
||||||
user.key = data.Key;
|
user.akey = data.Key;
|
||||||
|
|
||||||
user.save(&conn);
|
user.save(&conn)
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/delete", data = "<data>")]
|
#[post("/accounts/delete", data = "<data>")]
|
||||||
|
fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
|
delete_account(data, headers, conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[delete("/accounts", data = "<data>")]
|
||||||
fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
@@ -219,31 +396,60 @@ fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn
|
|||||||
err!("Invalid password")
|
err!("Invalid password")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete ciphers and their attachments
|
user.delete(&conn)
|
||||||
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn) {
|
|
||||||
if cipher.delete(&conn).is_err() {
|
|
||||||
err!("Failed deleting cipher")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete folders
|
|
||||||
for f in Folder::find_by_user(&user.uuid, &conn) {
|
|
||||||
if f.delete(&conn).is_err() {
|
|
||||||
err!("Failed deleting folder")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete devices
|
|
||||||
for d in Device::find_by_user(&user.uuid, &conn) { d.delete(&conn); }
|
|
||||||
|
|
||||||
// Delete user
|
|
||||||
user.delete(&conn);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/accounts/revision-date")]
|
#[get("/accounts/revision-date")]
|
||||||
fn revision_date(headers: Headers) -> String {
|
fn revision_date(headers: Headers) -> String {
|
||||||
let revision_date = headers.user.updated_at.timestamp();
|
let revision_date = headers.user.updated_at.timestamp_millis();
|
||||||
revision_date.to_string()
|
revision_date.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct PasswordHintData {
|
||||||
|
Email: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/password-hint", data = "<data>")]
|
||||||
|
fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
|
||||||
|
let data: PasswordHintData = data.into_inner().data;
|
||||||
|
|
||||||
|
let hint = match User::find_by_mail(&data.Email, &conn) {
|
||||||
|
Some(user) => user.password_hint,
|
||||||
|
None => return Ok(()),
|
||||||
|
};
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
mail::send_password_hint(&data.Email, hint)?;
|
||||||
|
} else if CONFIG.show_password_hint() {
|
||||||
|
if let Some(hint) = hint {
|
||||||
|
err!(format!("Your password hint is: {}", &hint));
|
||||||
|
} else {
|
||||||
|
err!("Sorry, you have no password hint...");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct PreloginData {
|
||||||
|
Email: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/prelogin", data = "<data>")]
|
||||||
|
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
||||||
|
let data: PreloginData = data.into_inner().data;
|
||||||
|
|
||||||
|
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
||||||
|
Some(user) => (user.client_kdf_type, user.client_kdf_iter),
|
||||||
|
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Kdf": kdf_type,
|
||||||
|
"KdfIterations": kdf_iter
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,20 +1,36 @@
|
|||||||
use rocket_contrib::{Json, Value};
|
use rocket_contrib::json::Json;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
use db::DbConn;
|
use crate::db::models::*;
|
||||||
use db::models::*;
|
use crate::db::DbConn;
|
||||||
|
|
||||||
use api::{JsonResult, EmptyResult, JsonUpcase};
|
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType};
|
||||||
use auth::Headers;
|
use crate::auth::Headers;
|
||||||
|
|
||||||
|
use rocket::Route;
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<Route> {
|
||||||
|
routes![
|
||||||
|
get_folders,
|
||||||
|
get_folder,
|
||||||
|
post_folders,
|
||||||
|
post_folder,
|
||||||
|
put_folder,
|
||||||
|
delete_folder_post,
|
||||||
|
delete_folder,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/folders")]
|
#[get("/folders")]
|
||||||
fn get_folders(headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_folders(headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||||
|
|
||||||
let folders_json: Vec<Value> = folders.iter().map(|c| c.to_json()).collect();
|
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": folders_json,
|
"Data": folders_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
|
"ContinuationToken": null,
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -22,7 +38,7 @@ fn get_folders(headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||||
Some(folder) => folder,
|
Some(folder) => folder,
|
||||||
_ => err!("Invalid folder")
|
_ => err!("Invalid folder"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if folder.user_uuid != headers.user.uuid {
|
if folder.user_uuid != headers.user.uuid {
|
||||||
@@ -36,32 +52,33 @@ fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
|
|
||||||
pub struct FolderData {
|
pub struct FolderData {
|
||||||
pub Name: String
|
pub Name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders", data = "<data>")]
|
#[post("/folders", data = "<data>")]
|
||||||
fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
let data: FolderData = data.into_inner().data;
|
let data: FolderData = data.into_inner().data;
|
||||||
|
|
||||||
let mut folder = Folder::new(headers.user.uuid.clone(), data.Name);
|
let mut folder = Folder::new(headers.user.uuid.clone(), data.Name);
|
||||||
|
|
||||||
folder.save(&conn);
|
folder.save(&conn)?;
|
||||||
|
nt.send_folder_update(UpdateType::FolderCreate, &folder);
|
||||||
|
|
||||||
Ok(Json(folder.to_json()))
|
Ok(Json(folder.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders/<uuid>", data = "<data>")]
|
#[post("/folders/<uuid>", data = "<data>")]
|
||||||
fn post_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn post_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
put_folder(uuid, data, headers, conn)
|
put_folder(uuid, data, headers, conn, nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/folders/<uuid>", data = "<data>")]
|
#[put("/folders/<uuid>", data = "<data>")]
|
||||||
fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
let data: FolderData = data.into_inner().data;
|
let data: FolderData = data.into_inner().data;
|
||||||
|
|
||||||
let mut folder = match Folder::find_by_uuid(&uuid, &conn) {
|
let mut folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||||
Some(folder) => folder,
|
Some(folder) => folder,
|
||||||
_ => err!("Invalid folder")
|
_ => err!("Invalid folder"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if folder.user_uuid != headers.user.uuid {
|
if folder.user_uuid != headers.user.uuid {
|
||||||
@@ -70,21 +87,22 @@ fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn
|
|||||||
|
|
||||||
folder.name = data.Name;
|
folder.name = data.Name;
|
||||||
|
|
||||||
folder.save(&conn);
|
folder.save(&conn)?;
|
||||||
|
nt.send_folder_update(UpdateType::FolderUpdate, &folder);
|
||||||
|
|
||||||
Ok(Json(folder.to_json()))
|
Ok(Json(folder.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders/<uuid>/delete")]
|
#[post("/folders/<uuid>/delete")]
|
||||||
fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
delete_folder(uuid, headers, conn)
|
delete_folder(uuid, headers, conn, nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/folders/<uuid>")]
|
#[delete("/folders/<uuid>")]
|
||||||
fn delete_folder(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||||
Some(folder) => folder,
|
Some(folder) => folder,
|
||||||
_ => err!("Invalid folder")
|
_ => err!("Invalid folder"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if folder.user_uuid != headers.user.uuid {
|
if folder.user_uuid != headers.user.uuid {
|
||||||
@@ -92,8 +110,8 @@ fn delete_folder(uuid: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete the actual folder entry
|
// Delete the actual folder entry
|
||||||
match folder.delete(&conn) {
|
folder.delete(&conn)?;
|
||||||
Ok(()) => Ok(()),
|
|
||||||
Err(_) => err!("Failed deleting folder")
|
nt.send_folder_update(UpdateType::FolderDelete, &folder);
|
||||||
}
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@@ -4,146 +4,69 @@ mod folders;
|
|||||||
mod organizations;
|
mod organizations;
|
||||||
pub(crate) mod two_factor;
|
pub(crate) mod two_factor;
|
||||||
|
|
||||||
use self::accounts::*;
|
|
||||||
use self::ciphers::*;
|
|
||||||
use self::folders::*;
|
|
||||||
use self::organizations::*;
|
|
||||||
use self::two_factor::*;
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![
|
let mut mod_routes = routes![
|
||||||
register,
|
|
||||||
profile,
|
|
||||||
post_profile,
|
|
||||||
get_public_keys,
|
|
||||||
post_keys,
|
|
||||||
post_password,
|
|
||||||
post_sstamp,
|
|
||||||
post_email_token,
|
|
||||||
post_email,
|
|
||||||
delete_account,
|
|
||||||
revision_date,
|
|
||||||
|
|
||||||
sync,
|
|
||||||
|
|
||||||
get_ciphers,
|
|
||||||
get_cipher,
|
|
||||||
get_cipher_admin,
|
|
||||||
get_cipher_details,
|
|
||||||
post_ciphers,
|
|
||||||
post_ciphers_admin,
|
|
||||||
post_ciphers_import,
|
|
||||||
post_attachment,
|
|
||||||
delete_attachment_post,
|
|
||||||
delete_attachment,
|
|
||||||
post_cipher_admin,
|
|
||||||
post_cipher_share,
|
|
||||||
post_cipher,
|
|
||||||
put_cipher,
|
|
||||||
delete_cipher_post,
|
|
||||||
delete_cipher_post_admin,
|
|
||||||
delete_cipher,
|
|
||||||
delete_cipher_selected,
|
|
||||||
delete_all,
|
|
||||||
move_cipher_selected,
|
|
||||||
|
|
||||||
get_folders,
|
|
||||||
get_folder,
|
|
||||||
post_folders,
|
|
||||||
post_folder,
|
|
||||||
put_folder,
|
|
||||||
delete_folder_post,
|
|
||||||
delete_folder,
|
|
||||||
|
|
||||||
get_twofactor,
|
|
||||||
get_recover,
|
|
||||||
recover,
|
|
||||||
disable_twofactor,
|
|
||||||
generate_authenticator,
|
|
||||||
activate_authenticator,
|
|
||||||
generate_u2f,
|
|
||||||
activate_u2f,
|
|
||||||
|
|
||||||
get_organization,
|
|
||||||
create_organization,
|
|
||||||
delete_organization,
|
|
||||||
leave_organization,
|
|
||||||
get_user_collections,
|
|
||||||
get_org_collections,
|
|
||||||
get_org_collection_detail,
|
|
||||||
get_collection_users,
|
|
||||||
post_organization,
|
|
||||||
post_organization_collections,
|
|
||||||
post_organization_collection_delete_user,
|
|
||||||
post_organization_collection_update,
|
|
||||||
post_organization_collection_delete,
|
|
||||||
post_collections_update,
|
|
||||||
post_collections_admin,
|
|
||||||
get_org_details,
|
|
||||||
get_org_users,
|
|
||||||
send_invite,
|
|
||||||
confirm_invite,
|
|
||||||
get_user,
|
|
||||||
edit_user,
|
|
||||||
delete_user,
|
|
||||||
|
|
||||||
clear_device_token,
|
clear_device_token,
|
||||||
put_device_token,
|
put_device_token,
|
||||||
|
|
||||||
get_eq_domains,
|
get_eq_domains,
|
||||||
post_eq_domains,
|
post_eq_domains,
|
||||||
|
put_eq_domains,
|
||||||
|
hibp_breach,
|
||||||
|
];
|
||||||
|
|
||||||
]
|
let mut routes = Vec::new();
|
||||||
|
routes.append(&mut accounts::routes());
|
||||||
|
routes.append(&mut ciphers::routes());
|
||||||
|
routes.append(&mut folders::routes());
|
||||||
|
routes.append(&mut organizations::routes());
|
||||||
|
routes.append(&mut two_factor::routes());
|
||||||
|
routes.append(&mut mod_routes);
|
||||||
|
|
||||||
|
routes
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
//
|
||||||
/// Move this somewhere else
|
// Move this somewhere else
|
||||||
///
|
//
|
||||||
|
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
|
|
||||||
use rocket_contrib::{Json, Value};
|
use rocket_contrib::json::Json;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
use db::DbConn;
|
use crate::api::{EmptyResult, JsonResult, JsonUpcase};
|
||||||
use db::models::*;
|
use crate::auth::Headers;
|
||||||
|
use crate::db::DbConn;
|
||||||
|
use crate::error::Error;
|
||||||
|
|
||||||
use api::{JsonResult, EmptyResult, JsonUpcase};
|
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||||
use auth::Headers;
|
fn clear_device_token(uuid: String) -> EmptyResult {
|
||||||
|
// This endpoint doesn't have auth header
|
||||||
|
|
||||||
#[put("/devices/identifier/<uuid>/clear-token", data = "<data>")]
|
let _ = uuid;
|
||||||
fn clear_device_token(uuid: String, data: Json<Value>, headers: Headers, conn: DbConn) -> EmptyResult {
|
// uuid is not related to deviceId
|
||||||
let _data: Value = data.into_inner();
|
|
||||||
|
|
||||||
let device = match Device::find_by_uuid(&uuid, &conn) {
|
|
||||||
Some(device) => device,
|
|
||||||
None => err!("Device not found")
|
|
||||||
};
|
|
||||||
|
|
||||||
if device.user_uuid != headers.user.uuid {
|
|
||||||
err!("Device not owned by user")
|
|
||||||
}
|
|
||||||
|
|
||||||
device.delete(&conn);
|
|
||||||
|
|
||||||
|
// This only clears push token
|
||||||
|
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||||
|
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||||
fn put_device_token(uuid: String, data: Json<Value>, headers: Headers, conn: DbConn) -> JsonResult {
|
fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) -> JsonResult {
|
||||||
let _data: Value = data.into_inner();
|
let _data: Value = data.into_inner().data;
|
||||||
|
// Data has a single string value "PushToken"
|
||||||
|
let _ = uuid;
|
||||||
|
// uuid is not related to deviceId
|
||||||
|
|
||||||
let device = match Device::find_by_uuid(&uuid, &conn) {
|
// TODO: This should save the push token, but we don't have push functionality
|
||||||
Some(device) => device,
|
|
||||||
None => err!("Device not found")
|
|
||||||
};
|
|
||||||
|
|
||||||
if device.user_uuid != headers.user.uuid {
|
Ok(Json(json!({
|
||||||
err!("Device not owned by user")
|
"Id": headers.device.uuid,
|
||||||
}
|
"Name": headers.device.name,
|
||||||
|
"Type": headers.device.atype,
|
||||||
// TODO: What does this do?
|
"Identifier": headers.device.uuid,
|
||||||
|
"CreationDate": crate::util::format_date(&headers.device.created_at),
|
||||||
err!("Not implemented")
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
@@ -154,7 +77,7 @@ struct GlobalDomain {
|
|||||||
Excluded: bool,
|
Excluded: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
const GLOBAL_DOMAINS: &str = include_str!("global_domains.json");
|
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
||||||
|
|
||||||
#[get("/settings/domains")]
|
#[get("/settings/domains")]
|
||||||
fn get_eq_domains(headers: Headers) -> JsonResult {
|
fn get_eq_domains(headers: Headers) -> JsonResult {
|
||||||
@@ -177,7 +100,6 @@ fn get_eq_domains(headers: Headers) -> JsonResult {
|
|||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct EquivDomainData {
|
struct EquivDomainData {
|
||||||
@@ -186,7 +108,7 @@ struct EquivDomainData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/settings/domains", data = "<data>")]
|
#[post("/settings/domains", data = "<data>")]
|
||||||
fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let data: EquivDomainData = data.into_inner().data;
|
let data: EquivDomainData = data.into_inner().data;
|
||||||
|
|
||||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
||||||
@@ -195,10 +117,48 @@ fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: Db
|
|||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
use serde_json::to_string;
|
use serde_json::to_string;
|
||||||
|
|
||||||
user.excluded_globals = to_string(&excluded_globals).unwrap_or("[]".to_string());
|
user.excluded_globals = to_string(&excluded_globals).unwrap_or_else(|_| "[]".to_string());
|
||||||
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or("[]".to_string());
|
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_string());
|
||||||
|
|
||||||
user.save(&conn);
|
user.save(&conn)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(Json(json!({})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/settings/domains", data = "<data>")]
|
||||||
|
fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
|
post_eq_domains(data, headers, conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/hibp/breach?<username>")]
|
||||||
|
fn hibp_breach(username: String) -> JsonResult {
|
||||||
|
let user_agent = "Bitwarden_RS";
|
||||||
|
let url = format!(
|
||||||
|
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
||||||
|
username
|
||||||
|
);
|
||||||
|
|
||||||
|
use reqwest::{header::USER_AGENT, Client};
|
||||||
|
|
||||||
|
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||||
|
let res = Client::new()
|
||||||
|
.get(&url)
|
||||||
|
.header(USER_AGENT, user_agent)
|
||||||
|
.header("hibp-api-key", api_key)
|
||||||
|
.send()?;
|
||||||
|
|
||||||
|
// If we get a 404, return a 404, it means no breached accounts
|
||||||
|
if res.status() == 404 {
|
||||||
|
return Err(Error::empty().with_code(404));
|
||||||
|
}
|
||||||
|
|
||||||
|
let value: Value = res.error_for_status()?.json()?;
|
||||||
|
Ok(Json(value))
|
||||||
|
} else {
|
||||||
|
Ok(Json(json!([{
|
||||||
|
"title": "--- Error! ---",
|
||||||
|
"description": "HaveIBeenPwned API key not set! Go to https://haveibeenpwned.com/API/Key",
|
||||||
|
"logopath": "/bwrs_images/error-x.svg"
|
||||||
|
}])))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
366
src/api/icons.rs
366
src/api/icons.rs
@@ -1,25 +1,74 @@
|
|||||||
|
use std::fs::{create_dir_all, remove_file, symlink_metadata, File};
|
||||||
use std::io::prelude::*;
|
use std::io::prelude::*;
|
||||||
use std::fs::{create_dir_all, File};
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
use rocket::Route;
|
|
||||||
use rocket::response::Content;
|
|
||||||
use rocket::http::ContentType;
|
use rocket::http::ContentType;
|
||||||
|
use rocket::response::Content;
|
||||||
|
use rocket::Route;
|
||||||
|
|
||||||
use reqwest;
|
use reqwest::{header::HeaderMap, Client, Response};
|
||||||
|
|
||||||
use CONFIG;
|
use rocket::http::Cookie;
|
||||||
|
|
||||||
|
use regex::Regex;
|
||||||
|
use soup::prelude::*;
|
||||||
|
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::CONFIG;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![icon]
|
routes![icon]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const FALLBACK_ICON: &[u8; 344] = include_bytes!("../static/fallback-icon.png");
|
||||||
|
|
||||||
|
const ALLOWED_CHARS: &str = "_-.";
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
// Reuse the client between requests
|
||||||
|
static ref CLIENT: Client = Client::builder()
|
||||||
|
.use_sys_proxy()
|
||||||
|
.gzip(true)
|
||||||
|
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||||
|
.default_headers(_header_map())
|
||||||
|
.build()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_valid_domain(domain: &str) -> bool {
|
||||||
|
// Don't allow empty or too big domains or path traversal
|
||||||
|
if domain.is_empty() || domain.len() > 255 || domain.contains("..") {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only alphanumeric or specific characters
|
||||||
|
for c in domain.chars() {
|
||||||
|
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/<domain>/icon.png")]
|
#[get("/<domain>/icon.png")]
|
||||||
fn icon(domain: String) -> Content<Vec<u8>> {
|
fn icon(domain: String) -> Content<Vec<u8>> {
|
||||||
let icon_type = ContentType::new("image", "x-icon");
|
let icon_type = ContentType::new("image", "x-icon");
|
||||||
|
|
||||||
// Validate the domain to avoid directory traversal attacks
|
if !is_valid_domain(&domain) {
|
||||||
if domain.contains("/") || domain.contains("..") {
|
warn!("Invalid domain: {:#?}", domain);
|
||||||
return Content(icon_type, get_fallback_icon());
|
return Content(icon_type, FALLBACK_ICON.to_vec());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||||
|
info!("Icon blacklist enabled: {:#?}", blacklist);
|
||||||
|
|
||||||
|
let regex = Regex::new(&blacklist).expect("Valid Regex");
|
||||||
|
|
||||||
|
if regex.is_match(&domain) {
|
||||||
|
warn!("Blacklisted domain: {:#?}", domain);
|
||||||
|
return Content(icon_type, FALLBACK_ICON.to_vec());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let icon = get_icon(&domain);
|
let icon = get_icon(&domain);
|
||||||
@@ -27,29 +76,42 @@ fn icon(domain: String) -> Content<Vec<u8>> {
|
|||||||
Content(icon_type, icon)
|
Content(icon_type, icon)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_icon (domain: &str) -> Vec<u8> {
|
fn get_icon(domain: &str) -> Vec<u8> {
|
||||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder, domain);
|
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||||
|
|
||||||
if let Some(icon) = get_cached_icon(&path) {
|
if let Some(icon) = get_cached_icon(&path) {
|
||||||
return icon;
|
return icon;
|
||||||
}
|
}
|
||||||
|
|
||||||
let url = get_icon_url(&domain);
|
if CONFIG.disable_icon_download() {
|
||||||
|
return FALLBACK_ICON.to_vec();
|
||||||
|
}
|
||||||
|
|
||||||
// Get the icon, or fallback in case of error
|
// Get the icon, or fallback in case of error
|
||||||
match download_icon(&url) {
|
match download_icon(&domain) {
|
||||||
Ok(icon) => {
|
Ok(icon) => {
|
||||||
save_icon(&path, &icon);
|
save_icon(&path, &icon);
|
||||||
icon
|
icon
|
||||||
},
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
println!("Error downloading icon: {:?}", e);
|
error!("Error downloading icon: {:?}", e);
|
||||||
get_fallback_icon()
|
mark_negcache(&path);
|
||||||
|
FALLBACK_ICON.to_vec()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||||
|
// Check for expiration of negatively cached copy
|
||||||
|
if icon_is_negcached(path) {
|
||||||
|
return Some(FALLBACK_ICON.to_vec());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for expiration of successfully cached copy
|
||||||
|
if icon_is_expired(path) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
// Try to read the cached icon, and return it if it exists
|
// Try to read the cached icon, and return it if it exists
|
||||||
if let Ok(mut f) = File::open(path) {
|
if let Ok(mut f) = File::open(path) {
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
@@ -62,51 +124,271 @@ fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_icon_url(domain: &str) -> String {
|
fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
|
||||||
if CONFIG.local_icon_extractor {
|
let meta = symlink_metadata(path)?;
|
||||||
format!("http://{}/favicon.ico", domain)
|
let modified = meta.modified()?;
|
||||||
} else {
|
let age = SystemTime::now().duration_since(modified)?;
|
||||||
format!("https://icons.bitwarden.com/{}/icon.png", domain)
|
|
||||||
|
Ok(ttl > 0 && ttl <= age.as_secs())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn icon_is_negcached(path: &str) -> bool {
|
||||||
|
let miss_indicator = path.to_owned() + ".miss";
|
||||||
|
let expired = file_is_expired(&miss_indicator, CONFIG.icon_cache_negttl());
|
||||||
|
|
||||||
|
match expired {
|
||||||
|
// No longer negatively cached, drop the marker
|
||||||
|
Ok(true) => {
|
||||||
|
if let Err(e) = remove_file(&miss_indicator) {
|
||||||
|
error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e);
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
// The marker hasn't expired yet.
|
||||||
|
Ok(false) => true,
|
||||||
|
// The marker is missing or inaccessible in some way.
|
||||||
|
Err(_) => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn download_icon(url: &str) -> Result<Vec<u8>, reqwest::Error> {
|
fn mark_negcache(path: &str) {
|
||||||
println!("Downloading icon for {}...", url);
|
let miss_indicator = path.to_owned() + ".miss";
|
||||||
let mut res = reqwest::get(url)?;
|
File::create(&miss_indicator).expect("Error creating negative cache marker");
|
||||||
|
}
|
||||||
|
|
||||||
res = res.error_for_status()?;
|
fn icon_is_expired(path: &str) -> bool {
|
||||||
|
let expired = file_is_expired(path, CONFIG.icon_cache_ttl());
|
||||||
|
expired.unwrap_or(true)
|
||||||
|
}
|
||||||
|
|
||||||
let mut buffer: Vec<u8> = vec![];
|
#[derive(Debug)]
|
||||||
res.copy_to(&mut buffer)?;
|
struct Icon {
|
||||||
|
priority: u8,
|
||||||
|
href: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Icon {
|
||||||
|
fn new(priority: u8, href: String) -> Self {
|
||||||
|
Self { href, priority }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response.
|
||||||
|
/// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies.
|
||||||
|
/// This does not mean that that location does exists, but it is the default location browser use.
|
||||||
|
///
|
||||||
|
/// # Argument
|
||||||
|
/// * `domain` - A string which holds the domain with extension.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// let (mut iconlist, cookie_str) = get_icon_url("github.com")?;
|
||||||
|
/// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?;
|
||||||
|
/// ```
|
||||||
|
fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
||||||
|
// Default URL with secure and insecure schemes
|
||||||
|
let ssldomain = format!("https://{}", domain);
|
||||||
|
let httpdomain = format!("http://{}", domain);
|
||||||
|
|
||||||
|
// Create the iconlist
|
||||||
|
let mut iconlist: Vec<Icon> = Vec::new();
|
||||||
|
|
||||||
|
// Create the cookie_str to fill it all the cookies from the response
|
||||||
|
// These cookies can be used to request/download the favicon image.
|
||||||
|
// Some sites have extra security in place with for example XSRF Tokens.
|
||||||
|
let mut cookie_str = String::new();
|
||||||
|
|
||||||
|
let resp = get_page(&ssldomain).or_else(|_| get_page(&httpdomain));
|
||||||
|
if let Ok(content) = resp {
|
||||||
|
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
||||||
|
let url = content.url().clone();
|
||||||
|
let raw_cookies = content.headers().get_all("set-cookie");
|
||||||
|
cookie_str = raw_cookies
|
||||||
|
.iter()
|
||||||
|
.filter_map(|raw_cookie| raw_cookie.to_str().ok())
|
||||||
|
.map(|cookie_str| {
|
||||||
|
if let Ok(cookie) = Cookie::parse(cookie_str) {
|
||||||
|
format!("{}={}; ", cookie.name(), cookie.value())
|
||||||
|
} else {
|
||||||
|
String::new()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<String>();
|
||||||
|
|
||||||
|
// Add the default favicon.ico to the list with the domain the content responded from.
|
||||||
|
iconlist.push(Icon::new(35, url.join("/favicon.ico").unwrap().into_string()));
|
||||||
|
|
||||||
|
let soup = Soup::from_reader(content)?;
|
||||||
|
// Search for and filter
|
||||||
|
let favicons = soup
|
||||||
|
.tag("link")
|
||||||
|
.attr("rel", Regex::new(r"icon$|apple.*icon")?) // Only use icon rels
|
||||||
|
.attr("href", Regex::new(r"(?i)\w+\.(jpg|jpeg|png|ico)(\?.*)?$")?) // Only allow specific extensions
|
||||||
|
.find_all();
|
||||||
|
|
||||||
|
// Loop through all the found icons and determine it's priority
|
||||||
|
for favicon in favicons {
|
||||||
|
let sizes = favicon.get("sizes");
|
||||||
|
let href = favicon.get("href").expect("Missing href");
|
||||||
|
let full_href = url.join(&href).unwrap().into_string();
|
||||||
|
|
||||||
|
let priority = get_icon_priority(&full_href, sizes);
|
||||||
|
|
||||||
|
iconlist.push(Icon::new(priority, full_href))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Add the default favicon.ico to the list with just the given domain
|
||||||
|
iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort the iconlist by priority
|
||||||
|
iconlist.sort_by_key(|x| x.priority);
|
||||||
|
|
||||||
|
// There always is an icon in the list, so no need to check if it exists, and just return the first one
|
||||||
|
Ok((iconlist, cookie_str))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_page(url: &str) -> Result<Response, Error> {
|
||||||
|
get_page_with_cookies(url, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error> {
|
||||||
|
CLIENT
|
||||||
|
.get(url)
|
||||||
|
.header("cookie", cookie_str)
|
||||||
|
.send()?
|
||||||
|
.error_for_status()
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
||||||
|
/// The lower the number the better.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `href` - A string which holds the href value or relative path.
|
||||||
|
/// * `sizes` - The size of the icon if available as a <width>x<height> value like 32x32.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32");
|
||||||
|
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
||||||
|
/// ```
|
||||||
|
fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
|
||||||
|
// Check if there is a dimension set
|
||||||
|
let (width, height) = parse_sizes(sizes);
|
||||||
|
|
||||||
|
// Check if there is a size given
|
||||||
|
if width != 0 && height != 0 {
|
||||||
|
// Only allow square dimensions
|
||||||
|
if width == height {
|
||||||
|
// Change priority by given size
|
||||||
|
if width == 32 {
|
||||||
|
1
|
||||||
|
} else if width == 64 {
|
||||||
|
2
|
||||||
|
} else if width >= 24 && width <= 128 {
|
||||||
|
3
|
||||||
|
} else if width == 16 {
|
||||||
|
4
|
||||||
|
} else {
|
||||||
|
5
|
||||||
|
}
|
||||||
|
// There are dimensions available, but the image is not a square
|
||||||
|
} else {
|
||||||
|
200
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Change priority by file extension
|
||||||
|
if href.ends_with(".png") {
|
||||||
|
10
|
||||||
|
} else if href.ends_with(".jpg") || href.ends_with(".jpeg") {
|
||||||
|
20
|
||||||
|
} else {
|
||||||
|
30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a Tuple with the width and hight as a seperate value extracted from the sizes attribute
|
||||||
|
/// It will return 0 for both values if no match has been found.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `sizes` - The size of the icon if available as a <width>x<height> value like 32x32.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// let (width, height) = parse_sizes("64x64"); // (64, 64)
|
||||||
|
/// let (width, height) = parse_sizes("x128x128"); // (128, 128)
|
||||||
|
/// let (width, height) = parse_sizes("32"); // (0, 0)
|
||||||
|
/// ```
|
||||||
|
fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
|
||||||
|
let mut width: u16 = 0;
|
||||||
|
let mut height: u16 = 0;
|
||||||
|
|
||||||
|
if let Some(sizes) = sizes {
|
||||||
|
match Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap().captures(sizes.trim()) {
|
||||||
|
None => {}
|
||||||
|
Some(dimensions) => {
|
||||||
|
if dimensions.len() >= 3 {
|
||||||
|
width = dimensions[1].parse::<u16>().unwrap_or_default();
|
||||||
|
height = dimensions[2].parse::<u16>().unwrap_or_default();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(width, height)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||||
|
let (iconlist, cookie_str) = get_icon_url(&domain)?;
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
|
||||||
|
for icon in iconlist.iter().take(5) {
|
||||||
|
match get_page_with_cookies(&icon.href, &cookie_str) {
|
||||||
|
Ok(mut res) => {
|
||||||
|
info!("Downloaded icon from {}", icon.href);
|
||||||
|
res.copy_to(&mut buffer)?;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(_) => info!("Download failed for {}", icon.href),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if buffer.is_empty() {
|
||||||
|
err!("Empty response")
|
||||||
|
}
|
||||||
|
|
||||||
Ok(buffer)
|
Ok(buffer)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn save_icon(path: &str, icon: &[u8]) {
|
fn save_icon(path: &str, icon: &[u8]) {
|
||||||
create_dir_all(&CONFIG.icon_cache_folder).expect("Error creating icon cache");
|
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache");
|
||||||
|
|
||||||
if let Ok(mut f) = File::create(path) {
|
if let Ok(mut f) = File::create(path) {
|
||||||
f.write_all(icon).expect("Error writing icon file");
|
f.write_all(icon).expect("Error writing icon file");
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
const FALLBACK_ICON_URL: &str = "https://raw.githubusercontent.com/bitwarden/web/master/src/images/fa-globe.png";
|
fn _header_map() -> HeaderMap {
|
||||||
|
// Set some default headers for the request.
|
||||||
|
// Use a browser like user-agent to make sure most websites will return there correct website.
|
||||||
|
use reqwest::header::*;
|
||||||
|
|
||||||
fn get_fallback_icon() -> Vec<u8> {
|
macro_rules! headers {
|
||||||
let path = format!("{}/default.png", CONFIG.icon_cache_folder);
|
($( $name:ident : $value:literal),+ $(,)? ) => {
|
||||||
|
let mut headers = HeaderMap::new();
|
||||||
if let Some(icon) = get_cached_icon(&path) {
|
$( headers.insert($name, HeaderValue::from_static($value)); )+
|
||||||
return icon;
|
headers
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
match download_icon(FALLBACK_ICON_URL) {
|
headers! {
|
||||||
Ok(icon) => {
|
USER_AGENT: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299",
|
||||||
save_icon(&path, &icon);
|
ACCEPT_LANGUAGE: "en-US,en;q=0.8",
|
||||||
icon
|
CACHE_CONTROL: "no-cache",
|
||||||
},
|
PRAGMA: "no-cache",
|
||||||
Err(e) => {
|
ACCEPT: "text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8",
|
||||||
println!("Error downloading fallback icon: {:?}", e);
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,41 +1,59 @@
|
|||||||
use std::collections::HashMap;
|
use rocket::request::{Form, FormItems, FromForm};
|
||||||
|
use rocket::Route;
|
||||||
|
|
||||||
use rocket::request::{self, Form, FormItems, FromForm, FromRequest, Request};
|
use rocket_contrib::json::Json;
|
||||||
use rocket::{Outcome, Route};
|
use serde_json::Value;
|
||||||
|
|
||||||
use rocket_contrib::{Json, Value};
|
|
||||||
|
|
||||||
use num_traits::FromPrimitive;
|
use num_traits::FromPrimitive;
|
||||||
|
|
||||||
use db::models::*;
|
use crate::db::models::*;
|
||||||
use db::DbConn;
|
use crate::db::DbConn;
|
||||||
|
|
||||||
use util::{self, JsonMap};
|
use crate::util;
|
||||||
|
|
||||||
use api::{ApiResult, JsonResult};
|
use crate::api::{ApiResult, EmptyResult, JsonResult};
|
||||||
|
|
||||||
use CONFIG;
|
use crate::auth::ClientIp;
|
||||||
|
|
||||||
|
use crate::mail;
|
||||||
|
|
||||||
|
use crate::CONFIG;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![login]
|
routes![login]
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/connect/token", data = "<connect_data>")]
|
#[post("/connect/token", data = "<data>")]
|
||||||
fn login(connect_data: Form<ConnectData>, device_type: DeviceType, conn: DbConn) -> JsonResult {
|
fn login(data: Form<ConnectData>, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||||
let data = connect_data.get();
|
let data: ConnectData = data.into_inner();
|
||||||
|
|
||||||
match data.grant_type {
|
match data.grant_type.as_ref() {
|
||||||
GrantType::RefreshToken => _refresh_login(data, device_type, conn),
|
"refresh_token" => {
|
||||||
GrantType::Password => _password_login(data, device_type, conn),
|
_check_is_some(&data.refresh_token, "refresh_token cannot be blank")?;
|
||||||
|
_refresh_login(data, conn)
|
||||||
|
}
|
||||||
|
"password" => {
|
||||||
|
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||||
|
_check_is_some(&data.password, "password cannot be blank")?;
|
||||||
|
_check_is_some(&data.scope, "scope cannot be blank")?;
|
||||||
|
_check_is_some(&data.username, "username cannot be blank")?;
|
||||||
|
|
||||||
|
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
|
||||||
|
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||||
|
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||||
|
|
||||||
|
_password_login(data, conn, ip)
|
||||||
|
}
|
||||||
|
t => err!("Invalid type", t),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _refresh_login(data: &ConnectData, _device_type: DeviceType, conn: DbConn) -> JsonResult {
|
fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
||||||
// Extract token
|
// Extract token
|
||||||
let token = data.get("refresh_token");
|
let token = data.refresh_token.unwrap();
|
||||||
|
|
||||||
// Get device by refresh token
|
// Get device by refresh token
|
||||||
let mut device = match Device::find_by_refresh_token(token, &conn) {
|
let mut device = match Device::find_by_refresh_token(&token, &conn) {
|
||||||
Some(device) => device,
|
Some(device) => device,
|
||||||
None => err!("Invalid refresh token"),
|
None => err!("Invalid refresh token"),
|
||||||
};
|
};
|
||||||
@@ -45,156 +63,158 @@ fn _refresh_login(data: &ConnectData, _device_type: DeviceType, conn: DbConn) ->
|
|||||||
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
|
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
|
||||||
|
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
||||||
device.save(&conn);
|
|
||||||
|
|
||||||
|
device.save(&conn)?;
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"access_token": access_token,
|
"access_token": access_token,
|
||||||
"expires_in": expires_in,
|
"expires_in": expires_in,
|
||||||
"token_type": "Bearer",
|
"token_type": "Bearer",
|
||||||
"refresh_token": device.refresh_token,
|
"refresh_token": device.refresh_token,
|
||||||
"Key": user.key,
|
"Key": user.akey,
|
||||||
"PrivateKey": user.private_key,
|
"PrivateKey": user.private_key,
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _password_login(data: &ConnectData, device_type: DeviceType, conn: DbConn) -> JsonResult {
|
fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||||
// Validate scope
|
// Validate scope
|
||||||
let scope = data.get("scope");
|
let scope = data.scope.as_ref().unwrap();
|
||||||
if scope != "api offline_access" {
|
if scope != "api offline_access" {
|
||||||
err!("Scope not supported")
|
err!("Scope not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the user
|
// Get the user
|
||||||
let username = data.get("username");
|
let username = data.username.as_ref().unwrap();
|
||||||
let user = match User::find_by_mail(username, &conn) {
|
let user = match User::find_by_mail(username, &conn) {
|
||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
None => err!("Username or password is incorrect. Try again."),
|
None => err!(
|
||||||
|
"Username or password is incorrect. Try again",
|
||||||
|
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||||
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check password
|
// Check password
|
||||||
let password = data.get("password");
|
let password = data.password.as_ref().unwrap();
|
||||||
if !user.check_valid_password(password) {
|
if !user.check_valid_password(password) {
|
||||||
err!("Username or password is incorrect. Try again.")
|
err!(
|
||||||
|
"Username or password is incorrect. Try again",
|
||||||
|
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Let's only use the header and ignore the 'devicetype' parameter
|
let (mut device, new_device) = get_device(&data, &conn, &user);
|
||||||
let device_type_num = device_type.0;
|
|
||||||
|
|
||||||
let (device_id, device_name) = if data.is_device {
|
|
||||||
(
|
|
||||||
data.get("deviceidentifier").clone(),
|
|
||||||
data.get("devicename").clone(),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
(format!("web-{}", user.uuid), String::from("web"))
|
|
||||||
};
|
|
||||||
|
|
||||||
// Find device or create new
|
|
||||||
let mut device = match Device::find_by_uuid(&device_id, &conn) {
|
|
||||||
Some(device) => {
|
|
||||||
// Check if valid device
|
|
||||||
if device.user_uuid != user.uuid {
|
|
||||||
device.delete(&conn);
|
|
||||||
err!("Device is not owned by user")
|
|
||||||
}
|
|
||||||
|
|
||||||
device
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
// Create new device
|
|
||||||
Device::new(device_id, user.uuid.clone(), device_name, device_type_num)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &conn)?;
|
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &conn)?;
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() && new_device {
|
||||||
|
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &device.updated_at, &device.name) {
|
||||||
|
error!("Error sending new device email: {:#?}", e);
|
||||||
|
|
||||||
|
if CONFIG.require_device_email() {
|
||||||
|
err!("Could not send login notification email. Please contact your administrator.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Common
|
// Common
|
||||||
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
|
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
|
||||||
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
|
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
|
||||||
|
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
||||||
device.save(&conn);
|
device.save(&conn)?;
|
||||||
|
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
"access_token": access_token,
|
"access_token": access_token,
|
||||||
"expires_in": expires_in,
|
"expires_in": expires_in,
|
||||||
"token_type": "Bearer",
|
"token_type": "Bearer",
|
||||||
"refresh_token": device.refresh_token,
|
"refresh_token": device.refresh_token,
|
||||||
"Key": user.key,
|
"Key": user.akey,
|
||||||
"PrivateKey": user.private_key,
|
"PrivateKey": user.private_key,
|
||||||
//"TwoFactorToken": "11122233333444555666777888999"
|
|
||||||
});
|
});
|
||||||
|
|
||||||
if let Some(token) = twofactor_token {
|
if let Some(token) = twofactor_token {
|
||||||
result["TwoFactorToken"] = Value::String(token);
|
result["TwoFactorToken"] = Value::String(token);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info!("User {} logged in successfully. IP: {}", username, ip.ip);
|
||||||
Ok(Json(result))
|
Ok(Json(result))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Retrieves an existing device or creates a new device from ConnectData and the User
|
||||||
|
fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) {
|
||||||
|
// On iOS, device_type sends "iOS", on others it sends a number
|
||||||
|
let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(0);
|
||||||
|
let device_id = data.device_identifier.clone().expect("No device id provided");
|
||||||
|
let device_name = data.device_name.clone().expect("No device name provided");
|
||||||
|
|
||||||
|
let mut new_device = false;
|
||||||
|
// Find device or create new
|
||||||
|
let device = match Device::find_by_uuid(&device_id, &conn) {
|
||||||
|
Some(device) => {
|
||||||
|
// Check if owned device, and recreate if not
|
||||||
|
if device.user_uuid != user.uuid {
|
||||||
|
info!("Device exists but is owned by another user. The old device will be discarded");
|
||||||
|
new_device = true;
|
||||||
|
Device::new(device_id, user.uuid.clone(), device_name, device_type)
|
||||||
|
} else {
|
||||||
|
device
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
new_device = true;
|
||||||
|
Device::new(device_id, user.uuid.clone(), device_name, device_type)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
(device, new_device)
|
||||||
|
}
|
||||||
|
|
||||||
fn twofactor_auth(
|
fn twofactor_auth(
|
||||||
user_uuid: &str,
|
user_uuid: &str,
|
||||||
data: &ConnectData,
|
data: &ConnectData,
|
||||||
device: &mut Device,
|
device: &mut Device,
|
||||||
conn: &DbConn,
|
conn: &DbConn,
|
||||||
) -> ApiResult<Option<String>> {
|
) -> ApiResult<Option<String>> {
|
||||||
let twofactors_raw = TwoFactor::find_by_user(user_uuid, conn);
|
let twofactors = TwoFactor::find_by_user(user_uuid, conn);
|
||||||
// Remove u2f challenge twofactors (impl detail)
|
|
||||||
let twofactors: Vec<_> = twofactors_raw.iter().filter(|tf| tf.type_ < 1000).collect();
|
|
||||||
|
|
||||||
let providers: Vec<_> = twofactors.iter().map(|tf| tf.type_).collect();
|
|
||||||
|
|
||||||
// No twofactor token if twofactor is disabled
|
// No twofactor token if twofactor is disabled
|
||||||
if twofactors.len() == 0 {
|
if twofactors.is_empty() {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
let provider = match util::parse_option_string(data.get_opt("twoFactorProvider")) {
|
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||||
Some(provider) => provider,
|
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, asume the first one
|
||||||
None => providers[0], // If we aren't given a two factor provider, asume the first one
|
|
||||||
|
let twofactor_code = match data.two_factor_token {
|
||||||
|
Some(ref code) => code,
|
||||||
|
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?),
|
||||||
};
|
};
|
||||||
|
|
||||||
let twofactor_code = match data.get_opt("twoFactorToken") {
|
let selected_twofactor = twofactors.into_iter().filter(|tf| tf.atype == selected_id).nth(0);
|
||||||
Some(code) => code,
|
|
||||||
None => err_json!(_json_err_twofactor(&providers, user_uuid, conn)?),
|
|
||||||
};
|
|
||||||
|
|
||||||
let twofactor = twofactors.iter().filter(|tf| tf.type_ == provider).nth(0);
|
use crate::api::core::two_factor as _tf;
|
||||||
|
use crate::crypto::ct_eq;
|
||||||
|
|
||||||
|
let selected_data = _selected_data(selected_twofactor);
|
||||||
|
let mut remember = data.two_factor_remember.unwrap_or(0);
|
||||||
|
|
||||||
|
match TwoFactorType::from_i32(selected_id) {
|
||||||
|
Some(TwoFactorType::Authenticator) => _tf::validate_totp_code_str(twofactor_code, &selected_data?)?,
|
||||||
|
Some(TwoFactorType::U2f) => _tf::validate_u2f_login(user_uuid, twofactor_code, conn)?,
|
||||||
|
Some(TwoFactorType::YubiKey) => _tf::validate_yubikey_login(twofactor_code, &selected_data?)?,
|
||||||
|
Some(TwoFactorType::Duo) => _tf::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?,
|
||||||
|
|
||||||
match TwoFactorType::from_i32(provider) {
|
|
||||||
Some(TwoFactorType::Remember) => {
|
Some(TwoFactorType::Remember) => {
|
||||||
match &device.twofactor_remember {
|
match device.twofactor_remember {
|
||||||
Some(remember) if remember == twofactor_code => return Ok(None), // No twofactor token needed here
|
Some(ref code) if !CONFIG.disable_2fa_remember() && ct_eq(code, twofactor_code) => {
|
||||||
_ => err_json!(_json_err_twofactor(&providers, user_uuid, conn)?),
|
remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time
|
||||||
|
}
|
||||||
|
_ => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(TwoFactorType::Authenticator) => {
|
|
||||||
let twofactor = match twofactor {
|
|
||||||
Some(tf) => tf,
|
|
||||||
None => err!("TOTP not enabled"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let totp_code: u64 = match twofactor_code.parse() {
|
|
||||||
Ok(code) => code,
|
|
||||||
_ => err!("Invalid TOTP code"),
|
|
||||||
};
|
|
||||||
|
|
||||||
if !twofactor.check_totp_code(totp_code) {
|
|
||||||
err_json!(_json_err_twofactor(&providers, user_uuid, conn)?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(TwoFactorType::U2f) => {
|
|
||||||
use api::core::two_factor;
|
|
||||||
|
|
||||||
two_factor::validate_u2f_login(user_uuid, twofactor_code, conn)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
_ => err!("Invalid two factor provider"),
|
_ => err!("Invalid two factor provider"),
|
||||||
}
|
}
|
||||||
|
|
||||||
if util::parse_option_string(data.get_opt("twoFactorRemember")).unwrap_or(0) == 1 {
|
if !CONFIG.disable_2fa_remember() && remember == 1 {
|
||||||
Ok(Some(device.refresh_twofactor_remember()))
|
Ok(Some(device.refresh_twofactor_remember()))
|
||||||
} else {
|
} else {
|
||||||
device.delete_twofactor_remember();
|
device.delete_twofactor_remember();
|
||||||
@@ -202,8 +222,15 @@ fn twofactor_auth(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
|
||||||
|
match tf {
|
||||||
|
Some(tf) => Ok(tf.data),
|
||||||
|
None => err!("Two factor doesn't exist"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {
|
fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {
|
||||||
use api::core::two_factor;
|
use crate::api::core::two_factor;
|
||||||
|
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
"error" : "invalid_grant",
|
"error" : "invalid_grant",
|
||||||
@@ -218,31 +245,51 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
|||||||
match TwoFactorType::from_i32(*provider) {
|
match TwoFactorType::from_i32(*provider) {
|
||||||
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
||||||
|
|
||||||
Some(TwoFactorType::U2f) if CONFIG.domain_set => {
|
Some(TwoFactorType::U2f) if CONFIG.domain_set() => {
|
||||||
let request = two_factor::generate_u2f_login(user_uuid, conn)?;
|
let request = two_factor::generate_u2f_login(user_uuid, conn)?;
|
||||||
let mut challenge_list = Vec::new();
|
let mut challenge_list = Vec::new();
|
||||||
|
|
||||||
for key in request.registered_keys {
|
for key in request.registered_keys {
|
||||||
let mut challenge_map = JsonMap::new();
|
challenge_list.push(json!({
|
||||||
|
"appId": request.app_id,
|
||||||
challenge_map.insert("appId".into(), Value::String(request.app_id.clone()));
|
"challenge": request.challenge,
|
||||||
challenge_map
|
"version": key.version,
|
||||||
.insert("challenge".into(), Value::String(request.challenge.clone()));
|
"keyHandle": key.key_handle,
|
||||||
challenge_map.insert("version".into(), Value::String(key.version));
|
}));
|
||||||
challenge_map.insert(
|
|
||||||
"keyHandle".into(),
|
|
||||||
Value::String(key.key_handle.unwrap_or_default()),
|
|
||||||
);
|
|
||||||
|
|
||||||
challenge_list.push(Value::Object(challenge_map));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut map = JsonMap::new();
|
|
||||||
use serde_json;
|
|
||||||
let challenge_list_str = serde_json::to_string(&challenge_list).unwrap();
|
let challenge_list_str = serde_json::to_string(&challenge_list).unwrap();
|
||||||
|
|
||||||
map.insert("Challenges".into(), Value::String(challenge_list_str));
|
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||||
result["TwoFactorProviders2"][provider.to_string()] = Value::Object(map);
|
"Challenges": challenge_list_str,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(TwoFactorType::Duo) => {
|
||||||
|
let email = match User::find_by_uuid(user_uuid, &conn) {
|
||||||
|
Some(u) => u.email,
|
||||||
|
None => err!("User does not exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (signature, host) = two_factor::generate_duo_signature(&email, conn)?;
|
||||||
|
|
||||||
|
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||||
|
"Host": host,
|
||||||
|
"Signature": signature,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(tf_type @ TwoFactorType::YubiKey) => {
|
||||||
|
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, &conn) {
|
||||||
|
Some(tf) => tf,
|
||||||
|
None => err!("No YubiKey devices registered"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let yubikey_metadata: two_factor::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
|
||||||
|
|
||||||
|
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||||
|
"Nfc": yubikey_metadata.Nfc,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
_ => {}
|
_ => {}
|
||||||
@@ -252,93 +299,64 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Debug, Clone, Default)]
|
||||||
struct DeviceType(i32);
|
#[allow(non_snake_case)]
|
||||||
|
|
||||||
impl<'a, 'r> FromRequest<'a, 'r> for DeviceType {
|
|
||||||
type Error = &'static str;
|
|
||||||
|
|
||||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
|
||||||
let headers = request.headers();
|
|
||||||
let type_opt = headers.get_one("Device-Type");
|
|
||||||
let type_num = util::parse_option_string(type_opt).unwrap_or(0);
|
|
||||||
|
|
||||||
Outcome::Success(DeviceType(type_num))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct ConnectData {
|
struct ConnectData {
|
||||||
grant_type: GrantType,
|
grant_type: String, // refresh_token, password
|
||||||
is_device: bool,
|
|
||||||
data: HashMap<String, String>,
|
// Needed for grant_type="refresh_token"
|
||||||
|
refresh_token: Option<String>,
|
||||||
|
|
||||||
|
// Needed for grant_type="password"
|
||||||
|
client_id: Option<String>, // web, cli, desktop, browser, mobile
|
||||||
|
password: Option<String>,
|
||||||
|
scope: Option<String>,
|
||||||
|
username: Option<String>,
|
||||||
|
|
||||||
|
device_identifier: Option<String>,
|
||||||
|
device_name: Option<String>,
|
||||||
|
device_type: Option<String>,
|
||||||
|
|
||||||
|
// Needed for two-factor auth
|
||||||
|
two_factor_provider: Option<i32>,
|
||||||
|
two_factor_token: Option<String>,
|
||||||
|
two_factor_remember: Option<i32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Copy, Clone)]
|
|
||||||
enum GrantType {
|
|
||||||
RefreshToken,
|
|
||||||
Password,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConnectData {
|
|
||||||
fn get(&self, key: &str) -> &String {
|
|
||||||
&self.data[&key.to_lowercase()]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_opt(&self, key: &str) -> Option<&String> {
|
|
||||||
self.data.get(&key.to_lowercase())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const VALUES_REFRESH: [&str; 1] = ["refresh_token"];
|
|
||||||
const VALUES_PASSWORD: [&str; 5] = ["client_id", "grant_type", "password", "scope", "username"];
|
|
||||||
const VALUES_DEVICE: [&str; 3] = ["deviceidentifier", "devicename", "devicetype"];
|
|
||||||
|
|
||||||
impl<'f> FromForm<'f> for ConnectData {
|
impl<'f> FromForm<'f> for ConnectData {
|
||||||
type Error = String;
|
type Error = String;
|
||||||
|
|
||||||
fn from_form(items: &mut FormItems<'f>, _strict: bool) -> Result<Self, Self::Error> {
|
fn from_form(items: &mut FormItems<'f>, _strict: bool) -> Result<Self, Self::Error> {
|
||||||
let mut data = HashMap::new();
|
let mut form = Self::default();
|
||||||
|
for item in items {
|
||||||
|
let (key, value) = item.key_value_decoded();
|
||||||
|
let mut normalized_key = key.to_lowercase();
|
||||||
|
normalized_key.retain(|c| c != '_'); // Remove '_'
|
||||||
|
|
||||||
// Insert data into map
|
match normalized_key.as_ref() {
|
||||||
for (key, value) in items {
|
"granttype" => form.grant_type = value,
|
||||||
match (key.url_decode(), value.url_decode()) {
|
"refreshtoken" => form.refresh_token = Some(value),
|
||||||
(Ok(key), Ok(value)) => data.insert(key.to_lowercase(), value),
|
"clientid" => form.client_id = Some(value),
|
||||||
_ => return Err("Error decoding key or value".to_string()),
|
"password" => form.password = Some(value),
|
||||||
};
|
"scope" => form.scope = Some(value),
|
||||||
|
"username" => form.username = Some(value),
|
||||||
|
"deviceidentifier" => form.device_identifier = Some(value),
|
||||||
|
"devicename" => form.device_name = Some(value),
|
||||||
|
"devicetype" => form.device_type = Some(value),
|
||||||
|
"twofactorprovider" => form.two_factor_provider = value.parse().ok(),
|
||||||
|
"twofactortoken" => form.two_factor_token = Some(value),
|
||||||
|
"twofactorremember" => form.two_factor_remember = value.parse().ok(),
|
||||||
|
key => warn!("Detected unexpected parameter during login: {}", key),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate needed values
|
Ok(form)
|
||||||
let (grant_type, is_device) = match data.get("grant_type").map(String::as_ref) {
|
|
||||||
Some("refresh_token") => {
|
|
||||||
check_values(&data, &VALUES_REFRESH)?;
|
|
||||||
(GrantType::RefreshToken, false) // Device doesn't matter here
|
|
||||||
}
|
|
||||||
Some("password") => {
|
|
||||||
check_values(&data, &VALUES_PASSWORD)?;
|
|
||||||
|
|
||||||
let is_device = match data["client_id"].as_ref() {
|
|
||||||
"browser" | "mobile" => check_values(&data, &VALUES_DEVICE)?,
|
|
||||||
_ => false,
|
|
||||||
};
|
|
||||||
(GrantType::Password, is_device)
|
|
||||||
}
|
|
||||||
_ => return Err("Grant type not supported".to_string()),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(ConnectData {
|
|
||||||
grant_type,
|
|
||||||
is_device,
|
|
||||||
data,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_values(map: &HashMap<String, String>, values: &[&str]) -> Result<bool, String> {
|
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
|
||||||
for value in values {
|
if value.is_none() {
|
||||||
if !map.contains_key(*value) {
|
err!(msg)
|
||||||
return Err(format!("{} cannot be blank", value));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(true)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@@ -1,29 +1,35 @@
|
|||||||
|
mod admin;
|
||||||
pub(crate) mod core;
|
pub(crate) mod core;
|
||||||
mod icons;
|
mod icons;
|
||||||
mod identity;
|
mod identity;
|
||||||
|
mod notifications;
|
||||||
mod web;
|
mod web;
|
||||||
|
|
||||||
|
pub use self::admin::routes as admin_routes;
|
||||||
pub use self::core::routes as core_routes;
|
pub use self::core::routes as core_routes;
|
||||||
pub use self::icons::routes as icons_routes;
|
pub use self::icons::routes as icons_routes;
|
||||||
pub use self::identity::routes as identity_routes;
|
pub use self::identity::routes as identity_routes;
|
||||||
|
pub use self::notifications::routes as notifications_routes;
|
||||||
|
pub use self::notifications::{start_notification_server, Notify, UpdateType};
|
||||||
pub use self::web::routes as web_routes;
|
pub use self::web::routes as web_routes;
|
||||||
|
|
||||||
use rocket::response::status::BadRequest;
|
use rocket_contrib::json::Json;
|
||||||
use rocket_contrib::Json;
|
use serde_json::Value;
|
||||||
|
|
||||||
// Type aliases for API methods results
|
// Type aliases for API methods results
|
||||||
type ApiResult<T> = Result<T, BadRequest<Json>>;
|
type ApiResult<T> = Result<T, crate::error::Error>;
|
||||||
type JsonResult = ApiResult<Json>;
|
pub type JsonResult = ApiResult<Json<Value>>;
|
||||||
type EmptyResult = ApiResult<()>;
|
pub type EmptyResult = ApiResult<()>;
|
||||||
|
|
||||||
use util;
|
use crate::util;
|
||||||
type JsonUpcase<T> = Json<util::UpCase<T>>;
|
type JsonUpcase<T> = Json<util::UpCase<T>>;
|
||||||
|
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;
|
||||||
|
|
||||||
// Common structs representing JSON data received
|
// Common structs representing JSON data received
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct PasswordData {
|
struct PasswordData {
|
||||||
MasterPasswordHash: String
|
MasterPasswordHash: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug, Clone)]
|
#[derive(Deserialize, Debug, Clone)]
|
||||||
@@ -37,14 +43,17 @@ impl NumberOrString {
|
|||||||
fn into_string(self) -> String {
|
fn into_string(self) -> String {
|
||||||
match self {
|
match self {
|
||||||
NumberOrString::Number(n) => n.to_string(),
|
NumberOrString::Number(n) => n.to_string(),
|
||||||
NumberOrString::String(s) => s
|
NumberOrString::String(s) => s,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn into_i32(self) -> Option<i32> {
|
fn into_i32(self) -> ApiResult<i32> {
|
||||||
|
use std::num::ParseIntError as PIE;
|
||||||
match self {
|
match self {
|
||||||
NumberOrString::Number(n) => Some(n),
|
NumberOrString::Number(n) => Ok(n),
|
||||||
NumberOrString::String(s) => s.parse().ok()
|
NumberOrString::String(s) => s
|
||||||
|
.parse()
|
||||||
|
.map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
364
src/api/notifications.rs
Normal file
364
src/api/notifications.rs
Normal file
@@ -0,0 +1,364 @@
|
|||||||
|
use rocket::Route;
|
||||||
|
use rocket_contrib::json::Json;
|
||||||
|
use serde_json::Value as JsonValue;
|
||||||
|
|
||||||
|
use crate::api::JsonResult;
|
||||||
|
use crate::auth::Headers;
|
||||||
|
use crate::db::DbConn;
|
||||||
|
|
||||||
|
use crate::CONFIG;
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<Route> {
|
||||||
|
routes![negotiate, websockets_err]
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/hub")]
|
||||||
|
fn websockets_err() -> JsonResult {
|
||||||
|
err!("'/notifications/hub' should be proxied to the websocket server or notifications won't work. Go to the README for more info.")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/hub/negotiate")]
|
||||||
|
fn negotiate(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||||
|
use crate::crypto;
|
||||||
|
use data_encoding::BASE64URL;
|
||||||
|
|
||||||
|
let conn_id = BASE64URL.encode(&crypto::get_random(vec![0u8; 16]));
|
||||||
|
let mut available_transports: Vec<JsonValue> = Vec::new();
|
||||||
|
|
||||||
|
if CONFIG.websocket_enabled() {
|
||||||
|
available_transports.push(json!({"transport":"WebSockets", "transferFormats":["Text","Binary"]}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Implement transports
|
||||||
|
// Rocket WS support: https://github.com/SergioBenitez/Rocket/issues/90
|
||||||
|
// Rocket SSE support: https://github.com/SergioBenitez/Rocket/issues/33
|
||||||
|
// {"transport":"ServerSentEvents", "transferFormats":["Text"]},
|
||||||
|
// {"transport":"LongPolling", "transferFormats":["Text","Binary"]}
|
||||||
|
Ok(Json(json!({
|
||||||
|
"connectionId": conn_id,
|
||||||
|
"availableTransports": available_transports
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Websockets server
|
||||||
|
//
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::thread;
|
||||||
|
|
||||||
|
use ws::{self, util::Token, Factory, Handler, Handshake, Message, Sender, WebSocket};
|
||||||
|
|
||||||
|
use chashmap::CHashMap;
|
||||||
|
use chrono::NaiveDateTime;
|
||||||
|
use serde_json::from_str;
|
||||||
|
|
||||||
|
use crate::db::models::{Cipher, Folder, User};
|
||||||
|
|
||||||
|
use rmpv::Value;
|
||||||
|
|
||||||
|
fn serialize(val: Value) -> Vec<u8> {
|
||||||
|
use rmpv::encode::write_value;
|
||||||
|
|
||||||
|
let mut buf = Vec::new();
|
||||||
|
write_value(&mut buf, &val).expect("Error encoding MsgPack");
|
||||||
|
|
||||||
|
// Add size bytes at the start
|
||||||
|
// Extracted from BinaryMessageFormat.js
|
||||||
|
let mut size: usize = buf.len();
|
||||||
|
let mut len_buf: Vec<u8> = Vec::new();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let mut size_part = size & 0x7f;
|
||||||
|
size >>= 7;
|
||||||
|
|
||||||
|
if size > 0 {
|
||||||
|
size_part |= 0x80;
|
||||||
|
}
|
||||||
|
|
||||||
|
len_buf.push(size_part as u8);
|
||||||
|
|
||||||
|
if size == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
len_buf.append(&mut buf);
|
||||||
|
len_buf
|
||||||
|
}
|
||||||
|
|
||||||
|
fn serialize_date(date: NaiveDateTime) -> Value {
|
||||||
|
let seconds: i64 = date.timestamp();
|
||||||
|
let nanos: i64 = date.timestamp_subsec_nanos().into();
|
||||||
|
let timestamp = nanos << 34 | seconds;
|
||||||
|
|
||||||
|
let bs = timestamp.to_be_bytes();
|
||||||
|
|
||||||
|
// -1 is Timestamp
|
||||||
|
// https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type
|
||||||
|
Value::Ext(-1, bs.to_vec())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn convert_option<T: Into<Value>>(option: Option<T>) -> Value {
|
||||||
|
match option {
|
||||||
|
Some(a) => a.into(),
|
||||||
|
None => Value::Nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server WebSocket handler
|
||||||
|
pub struct WSHandler {
|
||||||
|
out: Sender,
|
||||||
|
user_uuid: Option<String>,
|
||||||
|
users: WebSocketUsers,
|
||||||
|
}
|
||||||
|
|
||||||
|
const RECORD_SEPARATOR: u8 = 0x1e;
|
||||||
|
const INITIAL_RESPONSE: [u8; 3] = [0x7b, 0x7d, RECORD_SEPARATOR]; // {, }, <RS>
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct InitialMessage {
|
||||||
|
protocol: String,
|
||||||
|
version: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
const PING_MS: u64 = 15_000;
|
||||||
|
const PING: Token = Token(1);
|
||||||
|
|
||||||
|
impl Handler for WSHandler {
|
||||||
|
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
|
||||||
|
// TODO: Improve this split
|
||||||
|
let path = hs.request.resource();
|
||||||
|
let mut query_split: Vec<_> = path.split('?').nth(1).unwrap().split('&').collect();
|
||||||
|
query_split.sort();
|
||||||
|
let access_token = &query_split[0][13..];
|
||||||
|
let _id = &query_split[1][3..];
|
||||||
|
|
||||||
|
// Validate the user
|
||||||
|
use crate::auth;
|
||||||
|
let claims = match auth::decode_login(access_token) {
|
||||||
|
Ok(claims) => claims,
|
||||||
|
Err(_) => return Err(ws::Error::new(ws::ErrorKind::Internal, "Invalid access token provided")),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Assign the user to the handler
|
||||||
|
let user_uuid = claims.sub;
|
||||||
|
self.user_uuid = Some(user_uuid.clone());
|
||||||
|
|
||||||
|
// Add the current Sender to the user list
|
||||||
|
let handler_insert = self.out.clone();
|
||||||
|
let handler_update = self.out.clone();
|
||||||
|
|
||||||
|
self.users
|
||||||
|
.map
|
||||||
|
.upsert(user_uuid, || vec![handler_insert], |ref mut v| v.push(handler_update));
|
||||||
|
|
||||||
|
// Schedule a ping to keep the connection alive
|
||||||
|
self.out.timeout(PING_MS, PING)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_message(&mut self, msg: Message) -> ws::Result<()> {
|
||||||
|
info!("Server got message '{}'. ", msg);
|
||||||
|
|
||||||
|
if let Message::Text(text) = msg.clone() {
|
||||||
|
let json = &text[..text.len() - 1]; // Remove last char
|
||||||
|
|
||||||
|
if let Ok(InitialMessage { protocol, version }) = from_str::<InitialMessage>(json) {
|
||||||
|
if &protocol == "messagepack" && version == 1 {
|
||||||
|
return self.out.send(&INITIAL_RESPONSE[..]); // Respond to initial message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it's not the initial message, just echo the message
|
||||||
|
self.out.send(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_timeout(&mut self, event: Token) -> ws::Result<()> {
|
||||||
|
if event == PING {
|
||||||
|
// send ping
|
||||||
|
self.out.send(create_ping())?;
|
||||||
|
|
||||||
|
// reschedule the timeout
|
||||||
|
self.out.timeout(PING_MS, PING)
|
||||||
|
} else {
|
||||||
|
Err(ws::Error::new(
|
||||||
|
ws::ErrorKind::Internal,
|
||||||
|
"Invalid timeout token provided",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct WSFactory {
|
||||||
|
pub users: WebSocketUsers,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WSFactory {
|
||||||
|
pub fn init() -> Self {
|
||||||
|
WSFactory {
|
||||||
|
users: WebSocketUsers {
|
||||||
|
map: Arc::new(CHashMap::new()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Factory for WSFactory {
|
||||||
|
type Handler = WSHandler;
|
||||||
|
|
||||||
|
fn connection_made(&mut self, out: Sender) -> Self::Handler {
|
||||||
|
WSHandler {
|
||||||
|
out,
|
||||||
|
user_uuid: None,
|
||||||
|
users: self.users.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn connection_lost(&mut self, handler: Self::Handler) {
|
||||||
|
// Remove handler
|
||||||
|
if let Some(user_uuid) = &handler.user_uuid {
|
||||||
|
if let Some(mut user_conn) = self.users.map.get_mut(user_uuid) {
|
||||||
|
user_conn.remove_item(&handler.out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct WebSocketUsers {
|
||||||
|
map: Arc<CHashMap<String, Vec<Sender>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WebSocketUsers {
|
||||||
|
fn send_update(&self, user_uuid: &str, data: &[u8]) -> ws::Result<()> {
|
||||||
|
if let Some(user) = self.map.get(user_uuid) {
|
||||||
|
for sender in user.iter() {
|
||||||
|
sender.send(data)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: The last modified date needs to be updated before calling these methods
|
||||||
|
pub fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||||
|
let data = create_update(
|
||||||
|
vec![
|
||||||
|
("UserId".into(), user.uuid.clone().into()),
|
||||||
|
("Date".into(), serialize_date(user.updated_at)),
|
||||||
|
],
|
||||||
|
ut,
|
||||||
|
);
|
||||||
|
|
||||||
|
self.send_update(&user.uuid, &data).ok();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {
|
||||||
|
let data = create_update(
|
||||||
|
vec![
|
||||||
|
("Id".into(), folder.uuid.clone().into()),
|
||||||
|
("UserId".into(), folder.user_uuid.clone().into()),
|
||||||
|
("RevisionDate".into(), serialize_date(folder.updated_at)),
|
||||||
|
],
|
||||||
|
ut,
|
||||||
|
);
|
||||||
|
|
||||||
|
self.send_update(&folder.user_uuid, &data).ok();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn send_cipher_update(&self, ut: UpdateType, cipher: &Cipher, user_uuids: &[String]) {
|
||||||
|
let user_uuid = convert_option(cipher.user_uuid.clone());
|
||||||
|
let org_uuid = convert_option(cipher.organization_uuid.clone());
|
||||||
|
|
||||||
|
let data = create_update(
|
||||||
|
vec![
|
||||||
|
("Id".into(), cipher.uuid.clone().into()),
|
||||||
|
("UserId".into(), user_uuid),
|
||||||
|
("OrganizationId".into(), org_uuid),
|
||||||
|
("CollectionIds".into(), Value::Nil),
|
||||||
|
("RevisionDate".into(), serialize_date(cipher.updated_at)),
|
||||||
|
],
|
||||||
|
ut,
|
||||||
|
);
|
||||||
|
|
||||||
|
for uuid in user_uuids {
|
||||||
|
self.send_update(&uuid, &data).ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Message Structure
|
||||||
|
[
|
||||||
|
1, // MessageType.Invocation
|
||||||
|
{}, // Headers
|
||||||
|
null, // InvocationId
|
||||||
|
"ReceiveMessage", // Target
|
||||||
|
[ // Arguments
|
||||||
|
{
|
||||||
|
"ContextId": "app_id",
|
||||||
|
"Type": ut as i32,
|
||||||
|
"Payload": {}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]
|
||||||
|
*/
|
||||||
|
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType) -> Vec<u8> {
|
||||||
|
use rmpv::Value as V;
|
||||||
|
|
||||||
|
let value = V::Array(vec![
|
||||||
|
1.into(),
|
||||||
|
V::Array(vec![]),
|
||||||
|
V::Nil,
|
||||||
|
"ReceiveMessage".into(),
|
||||||
|
V::Array(vec![V::Map(vec![
|
||||||
|
("ContextId".into(), "app_id".into()),
|
||||||
|
("Type".into(), (ut as i32).into()),
|
||||||
|
("Payload".into(), payload.into()),
|
||||||
|
])]),
|
||||||
|
]);
|
||||||
|
|
||||||
|
serialize(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_ping() -> Vec<u8> {
|
||||||
|
serialize(Value::Array(vec![6.into()]))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
#[derive(PartialEq)]
|
||||||
|
pub enum UpdateType {
|
||||||
|
CipherUpdate = 0,
|
||||||
|
CipherCreate = 1,
|
||||||
|
LoginDelete = 2,
|
||||||
|
FolderDelete = 3,
|
||||||
|
Ciphers = 4,
|
||||||
|
|
||||||
|
Vault = 5,
|
||||||
|
OrgKeys = 6,
|
||||||
|
FolderCreate = 7,
|
||||||
|
FolderUpdate = 8,
|
||||||
|
CipherDelete = 9,
|
||||||
|
SyncSettings = 10,
|
||||||
|
|
||||||
|
LogOut = 11,
|
||||||
|
|
||||||
|
None = 100,
|
||||||
|
}
|
||||||
|
|
||||||
|
use rocket::State;
|
||||||
|
pub type Notify<'a> = State<'a, WebSocketUsers>;
|
||||||
|
|
||||||
|
pub fn start_notification_server() -> WebSocketUsers {
|
||||||
|
let factory = WSFactory::init();
|
||||||
|
let users = factory.users.clone();
|
||||||
|
|
||||||
|
if CONFIG.websocket_enabled() {
|
||||||
|
thread::spawn(move || {
|
||||||
|
WebSocket::new(factory)
|
||||||
|
.unwrap()
|
||||||
|
.listen((CONFIG.websocket_address().as_str(), CONFIG.websocket_port()))
|
||||||
|
.unwrap();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
users
|
||||||
|
}
|
@@ -1,75 +1,75 @@
|
|||||||
use std::io;
|
use std::io;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use rocket::request::Request;
|
|
||||||
use rocket::response::{self, NamedFile, Responder};
|
|
||||||
use rocket::response::content::Content;
|
|
||||||
use rocket::http::ContentType;
|
use rocket::http::ContentType;
|
||||||
|
use rocket::response::content::Content;
|
||||||
|
use rocket::response::NamedFile;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use rocket_contrib::{Json, Value};
|
use rocket_contrib::json::Json;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
use CONFIG;
|
use crate::util::Cached;
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::CONFIG;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
if CONFIG.web_vault_enabled {
|
if CONFIG.web_vault_enabled() {
|
||||||
routes![web_index, app_id, web_files, attachments, alive]
|
routes![web_index, app_id, web_files, attachments, alive, images]
|
||||||
} else {
|
} else {
|
||||||
routes![attachments, alive]
|
routes![attachments, alive]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Might want to use in memory cache: https://github.com/hgzimmerman/rocket-file-cache
|
|
||||||
#[get("/")]
|
#[get("/")]
|
||||||
fn web_index() -> WebHeaders<io::Result<NamedFile>> {
|
fn web_index() -> Cached<io::Result<NamedFile>> {
|
||||||
web_files("index.html".into())
|
Cached::short(NamedFile::open(
|
||||||
|
Path::new(&CONFIG.web_vault_folder()).join("index.html"),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/app-id.json")]
|
#[get("/app-id.json")]
|
||||||
fn app_id() -> WebHeaders<Content<Json<Value>>> {
|
fn app_id() -> Cached<Content<Json<Value>>> {
|
||||||
let content_type = ContentType::new("application", "fido.trusted-apps+json");
|
let content_type = ContentType::new("application", "fido.trusted-apps+json");
|
||||||
|
|
||||||
WebHeaders(Content(content_type, Json(json!({
|
Cached::long(Content(
|
||||||
"trustedFacets": [
|
content_type,
|
||||||
{
|
Json(json!({
|
||||||
"version": { "major": 1, "minor": 0 },
|
"trustedFacets": [
|
||||||
"ids": [
|
{
|
||||||
&CONFIG.domain,
|
"version": { "major": 1, "minor": 0 },
|
||||||
"ios:bundle-id:com.8bit.bitwarden",
|
"ids": [
|
||||||
"android:apk-key-hash:dUGFzUzf3lmHSLBDBIv+WaFyZMI" ]
|
&CONFIG.domain(),
|
||||||
}]
|
"ios:bundle-id:com.8bit.bitwarden",
|
||||||
}))))
|
"android:apk-key-hash:dUGFzUzf3lmHSLBDBIv+WaFyZMI" ]
|
||||||
|
}]
|
||||||
|
})),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/<p..>", rank = 1)] // Only match this if the other routes don't match
|
#[get("/<p..>", rank = 10)] // Only match this if the other routes don't match
|
||||||
fn web_files(p: PathBuf) -> WebHeaders<io::Result<NamedFile>> {
|
fn web_files(p: PathBuf) -> Cached<io::Result<NamedFile>> {
|
||||||
WebHeaders(NamedFile::open(Path::new(&CONFIG.web_vault_folder).join(p)))
|
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)))
|
||||||
}
|
|
||||||
|
|
||||||
struct WebHeaders<R>(R);
|
|
||||||
|
|
||||||
impl<'r, R: Responder<'r>> Responder<'r> for WebHeaders<R> {
|
|
||||||
fn respond_to(self, req: &Request) -> response::Result<'r> {
|
|
||||||
let mut res = self.0.respond_to(req)?;
|
|
||||||
|
|
||||||
res.set_raw_header("Referrer-Policy", "same-origin");
|
|
||||||
res.set_raw_header("X-Frame-Options", "SAMEORIGIN");
|
|
||||||
res.set_raw_header("X-Content-Type-Options", "nosniff");
|
|
||||||
res.set_raw_header("X-XSS-Protection", "1; mode=block");
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/attachments/<uuid>/<file..>")]
|
#[get("/attachments/<uuid>/<file..>")]
|
||||||
fn attachments(uuid: String, file: PathBuf) -> io::Result<NamedFile> {
|
fn attachments(uuid: String, file: PathBuf) -> io::Result<NamedFile> {
|
||||||
NamedFile::open(Path::new(&CONFIG.attachments_folder).join(uuid).join(file))
|
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[get("/alive")]
|
#[get("/alive")]
|
||||||
fn alive() -> Json<String> {
|
fn alive() -> Json<String> {
|
||||||
use util::format_date;
|
use crate::util::format_date;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
|
|
||||||
Json(format_date(&Utc::now().naive_utc()))
|
Json(format_date(&Utc::now().naive_utc()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/bwrs_images/<filename>")]
|
||||||
|
fn images(filename: String) -> Result<Content<&'static [u8]>, Error> {
|
||||||
|
match filename.as_ref() {
|
||||||
|
"mail-github.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/mail-github.png"))),
|
||||||
|
"logo-gray.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))),
|
||||||
|
"error-x.svg" => Ok(Content(ContentType::SVG, include_bytes!("../static/images/error-x.svg"))),
|
||||||
|
_ => err!("Image not found"),
|
||||||
|
}
|
||||||
|
}
|
256
src/auth.rs
256
src/auth.rs
@@ -1,64 +1,73 @@
|
|||||||
///
|
//
|
||||||
/// JWT Handling
|
// JWT Handling
|
||||||
///
|
//
|
||||||
|
use crate::util::read_file;
|
||||||
|
use chrono::{Duration, Utc};
|
||||||
|
|
||||||
use util::read_file;
|
use jsonwebtoken::{self, Algorithm, Header};
|
||||||
use chrono::Duration;
|
use serde::de::DeserializeOwned;
|
||||||
|
|
||||||
use jwt;
|
|
||||||
use serde::ser::Serialize;
|
use serde::ser::Serialize;
|
||||||
|
|
||||||
use CONFIG;
|
use crate::error::{Error, MapResult};
|
||||||
|
use crate::CONFIG;
|
||||||
|
|
||||||
const JWT_ALGORITHM: jwt::Algorithm = jwt::Algorithm::RS256;
|
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref DEFAULT_VALIDITY: Duration = Duration::hours(2);
|
pub static ref DEFAULT_VALIDITY: Duration = Duration::hours(2);
|
||||||
pub static ref JWT_ISSUER: String = CONFIG.domain.clone();
|
static ref JWT_HEADER: Header = Header::new(JWT_ALGORITHM);
|
||||||
|
pub static ref JWT_LOGIN_ISSUER: String = format!("{}|login", CONFIG.domain());
|
||||||
static ref JWT_HEADER: jwt::Header = jwt::Header::new(JWT_ALGORITHM);
|
pub static ref JWT_INVITE_ISSUER: String = format!("{}|invite", CONFIG.domain());
|
||||||
|
pub static ref JWT_ADMIN_ISSUER: String = format!("{}|admin", CONFIG.domain());
|
||||||
static ref PRIVATE_RSA_KEY: Vec<u8> = match read_file(&CONFIG.private_rsa_key) {
|
static ref PRIVATE_RSA_KEY: Vec<u8> = match read_file(&CONFIG.private_rsa_key()) {
|
||||||
Ok(key) => key,
|
Ok(key) => key,
|
||||||
Err(e) => panic!("Error loading private RSA Key from {}\n Error: {}", CONFIG.private_rsa_key, e)
|
Err(e) => panic!("Error loading private RSA Key.\n Error: {}", e),
|
||||||
};
|
};
|
||||||
|
static ref PUBLIC_RSA_KEY: Vec<u8> = match read_file(&CONFIG.public_rsa_key()) {
|
||||||
static ref PUBLIC_RSA_KEY: Vec<u8> = match read_file(&CONFIG.public_rsa_key) {
|
|
||||||
Ok(key) => key,
|
Ok(key) => key,
|
||||||
Err(e) => panic!("Error loading public RSA Key from {}\n Error: {}", CONFIG.public_rsa_key, e)
|
Err(e) => panic!("Error loading public RSA Key.\n Error: {}", e),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
||||||
match jwt::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
|
match jsonwebtoken::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
|
||||||
Ok(token) => token,
|
Ok(token) => token,
|
||||||
Err(e) => panic!("Error encoding jwt {}", e)
|
Err(e) => panic!("Error encoding jwt {}", e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn decode_jwt(token: &str) -> Result<JWTClaims, String> {
|
fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Error> {
|
||||||
let validation = jwt::Validation {
|
let validation = jsonwebtoken::Validation {
|
||||||
leeway: 30, // 30 seconds
|
leeway: 30, // 30 seconds
|
||||||
validate_exp: true,
|
validate_exp: true,
|
||||||
validate_iat: true,
|
|
||||||
validate_nbf: true,
|
validate_nbf: true,
|
||||||
aud: None,
|
aud: None,
|
||||||
iss: Some(JWT_ISSUER.clone()),
|
iss: Some(issuer),
|
||||||
sub: None,
|
sub: None,
|
||||||
algorithms: vec![JWT_ALGORITHM],
|
algorithms: vec![JWT_ALGORITHM],
|
||||||
};
|
};
|
||||||
|
|
||||||
match jwt::decode(token, &PUBLIC_RSA_KEY, &validation) {
|
let token = token.replace(char::is_whitespace, "");
|
||||||
Ok(decoded) => Ok(decoded.claims),
|
|
||||||
Err(msg) => {
|
jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation)
|
||||||
println!("Error validating jwt - {:#?}", msg);
|
.map(|d| d.claims)
|
||||||
Err(msg.to_string())
|
.map_res("Error decoding JWT")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
pub fn decode_login(token: &str) -> Result<LoginJWTClaims, Error> {
|
||||||
|
decode_jwt(token, JWT_LOGIN_ISSUER.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decode_invite(token: &str) -> Result<InviteJWTClaims, Error> {
|
||||||
|
decode_jwt(token, JWT_INVITE_ISSUER.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decode_admin(token: &str) -> Result<AdminJWTClaims, Error> {
|
||||||
|
decode_jwt(token, JWT_ADMIN_ISSUER.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct JWTClaims {
|
pub struct LoginJWTClaims {
|
||||||
// Not before
|
// Not before
|
||||||
pub nbf: i64,
|
pub nbf: i64,
|
||||||
// Expiration time
|
// Expiration time
|
||||||
@@ -76,6 +85,7 @@ pub struct JWTClaims {
|
|||||||
pub orgowner: Vec<String>,
|
pub orgowner: Vec<String>,
|
||||||
pub orgadmin: Vec<String>,
|
pub orgadmin: Vec<String>,
|
||||||
pub orguser: Vec<String>,
|
pub orguser: Vec<String>,
|
||||||
|
pub orgmanager: Vec<String>,
|
||||||
|
|
||||||
// user security_stamp
|
// user security_stamp
|
||||||
pub sstamp: String,
|
pub sstamp: String,
|
||||||
@@ -87,15 +97,73 @@ pub struct JWTClaims {
|
|||||||
pub amr: Vec<String>,
|
pub amr: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
/// Bearer token authentication
|
pub struct InviteJWTClaims {
|
||||||
///
|
// Not before
|
||||||
|
pub nbf: i64,
|
||||||
|
// Expiration time
|
||||||
|
pub exp: i64,
|
||||||
|
// Issuer
|
||||||
|
pub iss: String,
|
||||||
|
// Subject
|
||||||
|
pub sub: String,
|
||||||
|
|
||||||
|
pub email: String,
|
||||||
|
pub org_id: Option<String>,
|
||||||
|
pub user_org_id: Option<String>,
|
||||||
|
pub invited_by_email: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate_invite_claims(
|
||||||
|
uuid: String,
|
||||||
|
email: String,
|
||||||
|
org_id: Option<String>,
|
||||||
|
org_user_id: Option<String>,
|
||||||
|
invited_by_email: Option<String>,
|
||||||
|
) -> InviteJWTClaims {
|
||||||
|
let time_now = Utc::now().naive_utc();
|
||||||
|
InviteJWTClaims {
|
||||||
|
nbf: time_now.timestamp(),
|
||||||
|
exp: (time_now + Duration::days(5)).timestamp(),
|
||||||
|
iss: JWT_INVITE_ISSUER.to_string(),
|
||||||
|
sub: uuid.clone(),
|
||||||
|
email: email.clone(),
|
||||||
|
org_id: org_id.clone(),
|
||||||
|
user_org_id: org_user_id.clone(),
|
||||||
|
invited_by_email: invited_by_email.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct AdminJWTClaims {
|
||||||
|
// Not before
|
||||||
|
pub nbf: i64,
|
||||||
|
// Expiration time
|
||||||
|
pub exp: i64,
|
||||||
|
// Issuer
|
||||||
|
pub iss: String,
|
||||||
|
// Subject
|
||||||
|
pub sub: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate_admin_claims() -> AdminJWTClaims {
|
||||||
|
let time_now = Utc::now().naive_utc();
|
||||||
|
AdminJWTClaims {
|
||||||
|
nbf: time_now.timestamp(),
|
||||||
|
exp: (time_now + Duration::minutes(20)).timestamp(),
|
||||||
|
iss: JWT_ADMIN_ISSUER.to_string(),
|
||||||
|
sub: "admin_panel".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Bearer token authentication
|
||||||
|
//
|
||||||
|
use rocket::request::{self, FromRequest, Request};
|
||||||
use rocket::Outcome;
|
use rocket::Outcome;
|
||||||
use rocket::request::{self, Request, FromRequest};
|
|
||||||
|
|
||||||
use db::DbConn;
|
use crate::db::models::{Device, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||||
use db::models::{User, UserOrganization, UserOrgType, Device};
|
use crate::db::DbConn;
|
||||||
|
|
||||||
pub struct Headers {
|
pub struct Headers {
|
||||||
pub host: String,
|
pub host: String,
|
||||||
@@ -110,8 +178,8 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
|||||||
let headers = request.headers();
|
let headers = request.headers();
|
||||||
|
|
||||||
// Get host
|
// Get host
|
||||||
let host = if CONFIG.domain_set {
|
let host = if CONFIG.domain_set() {
|
||||||
CONFIG.domain.clone()
|
CONFIG.domain()
|
||||||
} else if let Some(referer) = headers.get_one("Referer") {
|
} else if let Some(referer) = headers.get_one("Referer") {
|
||||||
referer.to_string()
|
referer.to_string()
|
||||||
} else {
|
} else {
|
||||||
@@ -138,20 +206,18 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Get access_token
|
// Get access_token
|
||||||
let access_token: &str = match request.headers().get_one("Authorization") {
|
let access_token: &str = match headers.get_one("Authorization") {
|
||||||
Some(a) => {
|
Some(a) => match a.rsplit("Bearer ").next() {
|
||||||
match a.rsplit("Bearer ").next() {
|
Some(split) => split,
|
||||||
Some(split) => split,
|
None => err_handler!("No access token provided"),
|
||||||
None => err_handler!("No access token provided")
|
},
|
||||||
}
|
None => err_handler!("No access token provided"),
|
||||||
}
|
|
||||||
None => err_handler!("No access token provided")
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check JWT token is valid and get device and user from it
|
// Check JWT token is valid and get device and user from it
|
||||||
let claims: JWTClaims = match decode_jwt(access_token) {
|
let claims = match decode_login(access_token) {
|
||||||
Ok(claims) => claims,
|
Ok(claims) => claims,
|
||||||
Err(_) => err_handler!("Invalid claim")
|
Err(_) => err_handler!("Invalid claim"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let device_uuid = claims.device;
|
let device_uuid = claims.device;
|
||||||
@@ -159,17 +225,17 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
|||||||
|
|
||||||
let conn = match request.guard::<DbConn>() {
|
let conn = match request.guard::<DbConn>() {
|
||||||
Outcome::Success(conn) => conn,
|
Outcome::Success(conn) => conn,
|
||||||
_ => err_handler!("Error getting DB")
|
_ => err_handler!("Error getting DB"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let device = match Device::find_by_uuid(&device_uuid, &conn) {
|
let device = match Device::find_by_uuid(&device_uuid, &conn) {
|
||||||
Some(device) => device,
|
Some(device) => device,
|
||||||
None => err_handler!("Invalid device id")
|
None => err_handler!("Invalid device id"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let user = match User::find_by_uuid(&user_uuid, &conn) {
|
let user = match User::find_by_uuid(&user_uuid, &conn) {
|
||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
None => err_handler!("Device has no user associated")
|
None => err_handler!("Device has no user associated"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if user.security_stamp != claims.sstamp {
|
if user.security_stamp != claims.sstamp {
|
||||||
@@ -184,7 +250,7 @@ pub struct OrgHeaders {
|
|||||||
pub host: String,
|
pub host: String,
|
||||||
pub device: Device,
|
pub device: Device,
|
||||||
pub user: User,
|
pub user: User,
|
||||||
pub org_user_type: i32,
|
pub org_user_type: UserOrgType,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
||||||
@@ -192,30 +258,44 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
|||||||
|
|
||||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||||
match request.guard::<Headers>() {
|
match request.guard::<Headers>() {
|
||||||
Outcome::Forward(f) => Outcome::Forward(f),
|
Outcome::Forward(_) => Outcome::Forward(()),
|
||||||
Outcome::Failure(f) => Outcome::Failure(f),
|
Outcome::Failure(f) => Outcome::Failure(f),
|
||||||
Outcome::Success(headers) => {
|
Outcome::Success(headers) => {
|
||||||
// org_id is expected to be the first dynamic param
|
// org_id is expected to be the second param ("/organizations/<org_id>")
|
||||||
match request.get_param::<String>(0) {
|
match request.get_param::<String>(1) {
|
||||||
Err(_) => err_handler!("Error getting the organization id"),
|
Some(Ok(org_id)) => {
|
||||||
Ok(org_id) => {
|
|
||||||
let conn = match request.guard::<DbConn>() {
|
let conn = match request.guard::<DbConn>() {
|
||||||
Outcome::Success(conn) => conn,
|
Outcome::Success(conn) => conn,
|
||||||
_ => err_handler!("Error getting DB")
|
_ => err_handler!("Error getting DB"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let org_user = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
let user = headers.user;
|
||||||
Some(user) => user,
|
let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn) {
|
||||||
None => err_handler!("The current user isn't member of the organization")
|
Some(user) => {
|
||||||
|
if user.status == UserOrgStatus::Confirmed as i32 {
|
||||||
|
user
|
||||||
|
} else {
|
||||||
|
err_handler!("The current user isn't confirmed member of the organization")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => err_handler!("The current user isn't member of the organization"),
|
||||||
};
|
};
|
||||||
|
|
||||||
Outcome::Success(Self{
|
Outcome::Success(Self {
|
||||||
host: headers.host,
|
host: headers.host,
|
||||||
device: headers.device,
|
device: headers.device,
|
||||||
user: headers.user,
|
user,
|
||||||
org_user_type: org_user.type_,
|
org_user_type: {
|
||||||
|
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) {
|
||||||
|
org_usr_type
|
||||||
|
} else {
|
||||||
|
// This should only happen if the DB is corrupted
|
||||||
|
err_handler!("Unknown user type in the database")
|
||||||
|
}
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
_ => err_handler!("Error getting the organization id"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -226,7 +306,7 @@ pub struct AdminHeaders {
|
|||||||
pub host: String,
|
pub host: String,
|
||||||
pub device: Device,
|
pub device: Device,
|
||||||
pub user: User,
|
pub user: User,
|
||||||
pub org_user_type: i32,
|
pub org_user_type: UserOrgType,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
|
impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
|
||||||
@@ -234,18 +314,18 @@ impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
|
|||||||
|
|
||||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||||
match request.guard::<OrgHeaders>() {
|
match request.guard::<OrgHeaders>() {
|
||||||
Outcome::Forward(f) => Outcome::Forward(f),
|
Outcome::Forward(_) => Outcome::Forward(()),
|
||||||
Outcome::Failure(f) => Outcome::Failure(f),
|
Outcome::Failure(f) => Outcome::Failure(f),
|
||||||
Outcome::Success(headers) => {
|
Outcome::Success(headers) => {
|
||||||
if headers.org_user_type > UserOrgType::Admin as i32 {
|
if headers.org_user_type >= UserOrgType::Admin {
|
||||||
err_handler!("You need to be Admin or Owner to call this endpoint")
|
Outcome::Success(Self {
|
||||||
} else {
|
|
||||||
Outcome::Success(Self{
|
|
||||||
host: headers.host,
|
host: headers.host,
|
||||||
device: headers.device,
|
device: headers.device,
|
||||||
user: headers.user,
|
user: headers.user,
|
||||||
org_user_type: headers.org_user_type,
|
org_user_type: headers.org_user_type,
|
||||||
})
|
})
|
||||||
|
} else {
|
||||||
|
err_handler!("You need to be Admin or Owner to call this endpoint")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -263,19 +343,41 @@ impl<'a, 'r> FromRequest<'a, 'r> for OwnerHeaders {
|
|||||||
|
|
||||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||||
match request.guard::<OrgHeaders>() {
|
match request.guard::<OrgHeaders>() {
|
||||||
Outcome::Forward(f) => Outcome::Forward(f),
|
Outcome::Forward(_) => Outcome::Forward(()),
|
||||||
Outcome::Failure(f) => Outcome::Failure(f),
|
Outcome::Failure(f) => Outcome::Failure(f),
|
||||||
Outcome::Success(headers) => {
|
Outcome::Success(headers) => {
|
||||||
if headers.org_user_type > UserOrgType::Owner as i32 {
|
if headers.org_user_type == UserOrgType::Owner {
|
||||||
err_handler!("You need to be Owner to call this endpoint")
|
Outcome::Success(Self {
|
||||||
} else {
|
|
||||||
Outcome::Success(Self{
|
|
||||||
host: headers.host,
|
host: headers.host,
|
||||||
device: headers.device,
|
device: headers.device,
|
||||||
user: headers.user,
|
user: headers.user,
|
||||||
})
|
})
|
||||||
|
} else {
|
||||||
|
err_handler!("You need to be Owner to call this endpoint")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Client IP address detection
|
||||||
|
//
|
||||||
|
use std::net::IpAddr;
|
||||||
|
|
||||||
|
pub struct ClientIp {
|
||||||
|
pub ip: IpAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, 'r> FromRequest<'a, 'r> for ClientIp {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||||
|
let ip = match request.client_ip() {
|
||||||
|
Some(addr) => addr,
|
||||||
|
None => "0.0.0.0".parse().unwrap(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Outcome::Success(ClientIp { ip })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
605
src/config.rs
Normal file
605
src/config.rs
Normal file
@@ -0,0 +1,605 @@
|
|||||||
|
use std::process::exit;
|
||||||
|
use std::sync::RwLock;
|
||||||
|
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::util::get_env;
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
pub static ref CONFIG: Config = Config::load().unwrap_or_else(|e| {
|
||||||
|
println!("Error loading config:\n\t{:?}\n", e);
|
||||||
|
exit(12)
|
||||||
|
});
|
||||||
|
pub static ref CONFIG_FILE: String = {
|
||||||
|
let data_folder = get_env("DATA_FOLDER").unwrap_or_else(|| String::from("data"));
|
||||||
|
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{}/config.json", data_folder))
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Pass = String;
|
||||||
|
|
||||||
|
macro_rules! make_config {
|
||||||
|
($(
|
||||||
|
$(#[doc = $groupdoc:literal])?
|
||||||
|
$group:ident $(: $group_enabled:ident)? {
|
||||||
|
$(
|
||||||
|
$(#[doc = $doc:literal])+
|
||||||
|
$name:ident : $ty:ty, $editable:literal, $none_action:ident $(, $default:expr)?;
|
||||||
|
)+},
|
||||||
|
)+) => {
|
||||||
|
pub struct Config { inner: RwLock<Inner> }
|
||||||
|
|
||||||
|
struct Inner {
|
||||||
|
templates: Handlebars,
|
||||||
|
config: ConfigItems,
|
||||||
|
|
||||||
|
_env: ConfigBuilder,
|
||||||
|
_usr: ConfigBuilder,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
|
||||||
|
pub struct ConfigBuilder {
|
||||||
|
$($(
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
$name: Option<$ty>,
|
||||||
|
)+)+
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConfigBuilder {
|
||||||
|
fn from_env() -> Self {
|
||||||
|
dotenv::from_path(".env").ok();
|
||||||
|
|
||||||
|
let mut builder = ConfigBuilder::default();
|
||||||
|
$($(
|
||||||
|
builder.$name = get_env(&stringify!($name).to_uppercase());
|
||||||
|
)+)+
|
||||||
|
|
||||||
|
builder
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_file(path: &str) -> Result<Self, Error> {
|
||||||
|
use crate::util::read_file_string;
|
||||||
|
let config_str = read_file_string(path)?;
|
||||||
|
serde_json::from_str(&config_str).map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Merges the values of both builders into a new builder.
|
||||||
|
/// If both have the same element, `other` wins.
|
||||||
|
fn merge(&self, other: &Self, show_overrides: bool) -> Self {
|
||||||
|
let mut overrides = Vec::new();
|
||||||
|
let mut builder = self.clone();
|
||||||
|
$($(
|
||||||
|
if let v @Some(_) = &other.$name {
|
||||||
|
builder.$name = v.clone();
|
||||||
|
|
||||||
|
if self.$name.is_some() {
|
||||||
|
overrides.push(stringify!($name).to_uppercase());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)+)+
|
||||||
|
|
||||||
|
if show_overrides && !overrides.is_empty() {
|
||||||
|
// We can't use warn! here because logging isn't setup yet.
|
||||||
|
println!("[WARNING] The following environment variables are being overriden by the config file,");
|
||||||
|
println!("[WARNING] please use the admin panel to make changes to them:");
|
||||||
|
println!("[WARNING] {}\n", overrides.join(", "));
|
||||||
|
}
|
||||||
|
|
||||||
|
builder
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a new builder with all the elements from self,
|
||||||
|
/// except those that are equal in both sides
|
||||||
|
fn _remove(&self, other: &Self) -> Self {
|
||||||
|
let mut builder = ConfigBuilder::default();
|
||||||
|
$($(
|
||||||
|
if &self.$name != &other.$name {
|
||||||
|
builder.$name = self.$name.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
)+)+
|
||||||
|
builder
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build(&self) -> ConfigItems {
|
||||||
|
let mut config = ConfigItems::default();
|
||||||
|
let _domain_set = self.domain.is_some();
|
||||||
|
$($(
|
||||||
|
config.$name = make_config!{ @build self.$name.clone(), &config, $none_action, $($default)? };
|
||||||
|
)+)+
|
||||||
|
config.domain_set = _domain_set;
|
||||||
|
|
||||||
|
config
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
pub struct ConfigItems { $($(pub $name: make_config!{@type $ty, $none_action}, )+)+ }
|
||||||
|
|
||||||
|
#[allow(unused)]
|
||||||
|
impl Config {
|
||||||
|
$($(
|
||||||
|
pub fn $name(&self) -> make_config!{@type $ty, $none_action} {
|
||||||
|
self.inner.read().unwrap().config.$name.clone()
|
||||||
|
}
|
||||||
|
)+)+
|
||||||
|
|
||||||
|
pub fn prepare_json(&self) -> serde_json::Value {
|
||||||
|
let (def, cfg) = {
|
||||||
|
let inner = &self.inner.read().unwrap();
|
||||||
|
(inner._env.build(), inner.config.clone())
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
fn _get_form_type(rust_type: &str) -> &'static str {
|
||||||
|
match rust_type {
|
||||||
|
"Pass" => "password",
|
||||||
|
"String" => "text",
|
||||||
|
"bool" => "checkbox",
|
||||||
|
_ => "number"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn _get_doc(doc: &str) -> serde_json::Value {
|
||||||
|
let mut split = doc.split("|>").map(str::trim);
|
||||||
|
json!({
|
||||||
|
"name": split.next(),
|
||||||
|
"description": split.next()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
json!([ $({
|
||||||
|
"group": stringify!($group),
|
||||||
|
"grouptoggle": stringify!($($group_enabled)?),
|
||||||
|
"groupdoc": make_config!{ @show $($groupdoc)? },
|
||||||
|
"elements": [
|
||||||
|
$( {
|
||||||
|
"editable": $editable,
|
||||||
|
"name": stringify!($name),
|
||||||
|
"value": cfg.$name,
|
||||||
|
"default": def.$name,
|
||||||
|
"type": _get_form_type(stringify!($ty)),
|
||||||
|
"doc": _get_doc(concat!($($doc),+)),
|
||||||
|
}, )+
|
||||||
|
]}, )+ ])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Group or empty string
|
||||||
|
( @show ) => { "" };
|
||||||
|
( @show $lit:literal ) => { $lit };
|
||||||
|
|
||||||
|
// Wrap the optionals in an Option type
|
||||||
|
( @type $ty:ty, option) => { Option<$ty> };
|
||||||
|
( @type $ty:ty, $id:ident) => { $ty };
|
||||||
|
|
||||||
|
// Generate the values depending on none_action
|
||||||
|
( @build $value:expr, $config:expr, option, ) => { $value };
|
||||||
|
( @build $value:expr, $config:expr, def, $default:expr ) => { $value.unwrap_or($default) };
|
||||||
|
( @build $value:expr, $config:expr, auto, $default_fn:expr ) => {{
|
||||||
|
match $value {
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn;
|
||||||
|
f($config)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
//STRUCTURE:
|
||||||
|
// /// Short description (without this they won't appear on the list)
|
||||||
|
// group {
|
||||||
|
// /// Friendly Name |> Description (Optional)
|
||||||
|
// name: type, is_editable, none_action, <default_value (Optional)>
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Where none_action applied when the value wasn't provided and can be:
|
||||||
|
// def: Use a default value
|
||||||
|
// auto: Value is auto generated based on other values
|
||||||
|
// option: Value is optional
|
||||||
|
make_config! {
|
||||||
|
folders {
|
||||||
|
/// Data folder |> Main data folder
|
||||||
|
data_folder: String, false, def, "data".to_string();
|
||||||
|
/// Database URL
|
||||||
|
database_url: String, false, auto, |c| format!("{}/{}", c.data_folder, "db.sqlite3");
|
||||||
|
/// Icon cache folder
|
||||||
|
icon_cache_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "icon_cache");
|
||||||
|
/// Attachments folder
|
||||||
|
attachments_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "attachments");
|
||||||
|
/// Templates folder
|
||||||
|
templates_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "templates");
|
||||||
|
/// Session JWT key
|
||||||
|
rsa_key_filename: String, false, auto, |c| format!("{}/{}", c.data_folder, "rsa_key");
|
||||||
|
/// Web vault folder
|
||||||
|
web_vault_folder: String, false, def, "web-vault/".to_string();
|
||||||
|
},
|
||||||
|
ws {
|
||||||
|
/// Enable websocket notifications
|
||||||
|
websocket_enabled: bool, false, def, false;
|
||||||
|
/// Websocket address
|
||||||
|
websocket_address: String, false, def, "0.0.0.0".to_string();
|
||||||
|
/// Websocket port
|
||||||
|
websocket_port: u16, false, def, 3012;
|
||||||
|
},
|
||||||
|
|
||||||
|
/// General settings
|
||||||
|
settings {
|
||||||
|
/// Domain URL |> This needs to be set to the URL used to access the server, including 'http[s]://'
|
||||||
|
/// and port, if it's different than the default. Some server functions don't work correctly without this value
|
||||||
|
domain: String, true, def, "http://localhost".to_string();
|
||||||
|
/// Domain Set |> Indicates if the domain is set by the admin. Otherwise the default will be used.
|
||||||
|
domain_set: bool, false, def, false;
|
||||||
|
/// Enable web vault
|
||||||
|
web_vault_enabled: bool, false, def, true;
|
||||||
|
|
||||||
|
/// HIBP Api Key |> HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||||
|
hibp_api_key: Pass, true, option;
|
||||||
|
|
||||||
|
/// Disable icon downloads |> Set to true to disable icon downloading, this would still serve icons from
|
||||||
|
/// $ICON_CACHE_FOLDER, but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||||
|
/// otherwise it will delete them and they won't be downloaded again.
|
||||||
|
disable_icon_download: bool, true, def, false;
|
||||||
|
/// Allow new signups |> Controls if new users can register. Note that while this is disabled, users could still be invited
|
||||||
|
signups_allowed: bool, true, def, true;
|
||||||
|
/// Allow invitations |> Controls whether users can be invited by organization admins, even when signups are disabled
|
||||||
|
invitations_allowed: bool, true, def, true;
|
||||||
|
/// Password iterations |> Number of server-side passwords hashing iterations.
|
||||||
|
/// The changes only apply when a user changes their password. Not recommended to lower the value
|
||||||
|
password_iterations: i32, true, def, 100_000;
|
||||||
|
/// Show password hints |> Controls if the password hint should be shown directly in the web page.
|
||||||
|
/// Otherwise, if email is disabled, there is no way to see the password hint
|
||||||
|
show_password_hint: bool, true, def, true;
|
||||||
|
|
||||||
|
/// Admin page token |> The token used to authenticate in this very same page. Changing it here won't deauthorize the current session
|
||||||
|
admin_token: Pass, true, option;
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Advanced settings
|
||||||
|
advanced {
|
||||||
|
/// Positive icon cache expiry |> Number of seconds to consider that an already cached icon is fresh. After this period, the icon will be redownloaded
|
||||||
|
icon_cache_ttl: u64, true, def, 2_592_000;
|
||||||
|
/// Negative icon cache expiry |> Number of seconds before trying to download an icon that failed again.
|
||||||
|
icon_cache_negttl: u64, true, def, 259_200;
|
||||||
|
/// Icon download timeout |> Number of seconds when to stop attempting to download an icon.
|
||||||
|
icon_download_timeout: u64, true, def, 10;
|
||||||
|
/// Icon blacklist Regex |> Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||||
|
/// Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
|
icon_blacklist_regex: String, true, option;
|
||||||
|
|
||||||
|
/// Disable Two-Factor remember |> Enabling this would force the users to use a second factor to login every time.
|
||||||
|
/// Note that the checkbox would still be present, but ignored.
|
||||||
|
disable_2fa_remember: bool, true, def, false;
|
||||||
|
|
||||||
|
/// Require new device emails |> When a user logs in an email is required to be sent.
|
||||||
|
/// If sending the email fails the login attempt will fail.
|
||||||
|
require_device_email: bool, true, def, false;
|
||||||
|
|
||||||
|
/// Reload templates (Dev) |> When this is set to true, the templates get reloaded with every request.
|
||||||
|
/// ONLY use this during development, as it can slow down the server
|
||||||
|
reload_templates: bool, true, def, false;
|
||||||
|
|
||||||
|
/// Log routes at launch (Dev)
|
||||||
|
log_mounts: bool, true, def, false;
|
||||||
|
/// Enable extended logging
|
||||||
|
extended_logging: bool, false, def, true;
|
||||||
|
/// Enable the log to output to Syslog
|
||||||
|
use_syslog: bool, false, def, false;
|
||||||
|
/// Log file path
|
||||||
|
log_file: String, false, option;
|
||||||
|
/// Log level
|
||||||
|
log_level: String, false, def, "Info".to_string();
|
||||||
|
|
||||||
|
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using bitwarden_rs on some exotic filesystems,
|
||||||
|
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
|
||||||
|
enable_db_wal: bool, false, def, true;
|
||||||
|
|
||||||
|
/// Disable Admin Token (Know the risks!) |> Disables the Admin Token for the admin page so you may use your own auth in-front
|
||||||
|
disable_admin_token: bool, true, def, false;
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Yubikey settings
|
||||||
|
yubico: _enable_yubico {
|
||||||
|
/// Enabled
|
||||||
|
_enable_yubico: bool, true, def, true;
|
||||||
|
/// Client ID
|
||||||
|
yubico_client_id: String, true, option;
|
||||||
|
/// Secret Key
|
||||||
|
yubico_secret_key: Pass, true, option;
|
||||||
|
/// Server
|
||||||
|
yubico_server: String, true, option;
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Global Duo settings (Note that users can override them)
|
||||||
|
duo: _enable_duo {
|
||||||
|
/// Enabled
|
||||||
|
_enable_duo: bool, true, def, false;
|
||||||
|
/// Integration Key
|
||||||
|
duo_ikey: String, true, option;
|
||||||
|
/// Secret Key
|
||||||
|
duo_skey: Pass, true, option;
|
||||||
|
/// Host
|
||||||
|
duo_host: String, true, option;
|
||||||
|
/// Application Key (generated automatically)
|
||||||
|
_duo_akey: Pass, false, option;
|
||||||
|
},
|
||||||
|
|
||||||
|
/// SMTP Email Settings
|
||||||
|
smtp: _enable_smtp {
|
||||||
|
/// Enabled
|
||||||
|
_enable_smtp: bool, true, def, true;
|
||||||
|
/// Host
|
||||||
|
smtp_host: String, true, option;
|
||||||
|
/// Enable SSL
|
||||||
|
smtp_ssl: bool, true, def, true;
|
||||||
|
/// Use explicit TLS |> Enabling this would force the use of an explicit TLS connection, instead of upgrading an insecure one with STARTTLS
|
||||||
|
smtp_explicit_tls: bool, true, def, false;
|
||||||
|
/// Port
|
||||||
|
smtp_port: u16, true, auto, |c| if c.smtp_explicit_tls {465} else if c.smtp_ssl {587} else {25};
|
||||||
|
/// From Address
|
||||||
|
smtp_from: String, true, def, String::new();
|
||||||
|
/// From Name
|
||||||
|
smtp_from_name: String, true, def, "Bitwarden_RS".to_string();
|
||||||
|
/// Username
|
||||||
|
smtp_username: String, true, option;
|
||||||
|
/// Password
|
||||||
|
smtp_password: Pass, true, option;
|
||||||
|
/// Json form auth mechanism |> Defaults for ssl is "Plain" and "Login" and nothing for non-ssl connections. Possible values: ["Plain", "Login", "Xoauth2"]
|
||||||
|
smtp_auth_mechanism: String, true, option;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||||
|
if let Some(ref token) = cfg.admin_token {
|
||||||
|
if token.trim().is_empty() {
|
||||||
|
err!("`ADMIN_TOKEN` is enabled but has an empty value. To enable the admin page without token, use `DISABLE_ADMIN_TOKEN`")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cfg.duo_host.is_some() || cfg.duo_ikey.is_some() || cfg.duo_skey.is_some())
|
||||||
|
&& !(cfg.duo_host.is_some() && cfg.duo_ikey.is_some() && cfg.duo_skey.is_some())
|
||||||
|
{
|
||||||
|
err!("All Duo options need to be set for global Duo support")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.yubico_client_id.is_some() != cfg.yubico_secret_key.is_some() {
|
||||||
|
err!("Both `YUBICO_CLIENT_ID` and `YUBICO_SECRET_KEY` need to be set for Yubikey OTP support")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.smtp_host.is_some() == cfg.smtp_from.is_empty() {
|
||||||
|
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.smtp_username.is_some() != cfg.smtp_password.is_some() {
|
||||||
|
err!("Both `SMTP_USERNAME` and `SMTP_PASSWORD` need to be set to enable email authentication")
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Config {
|
||||||
|
pub fn load() -> Result<Self, Error> {
|
||||||
|
// Loading from env and file
|
||||||
|
let _env = ConfigBuilder::from_env();
|
||||||
|
let _usr = ConfigBuilder::from_file(&CONFIG_FILE).unwrap_or_default();
|
||||||
|
|
||||||
|
// Create merged config, config file overwrites env
|
||||||
|
let builder = _env.merge(&_usr, true);
|
||||||
|
|
||||||
|
// Fill any missing with defaults
|
||||||
|
let config = builder.build();
|
||||||
|
validate_config(&config)?;
|
||||||
|
|
||||||
|
Ok(Config {
|
||||||
|
inner: RwLock::new(Inner {
|
||||||
|
templates: load_templates(&config.templates_folder),
|
||||||
|
config,
|
||||||
|
_env,
|
||||||
|
_usr,
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_config(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||||
|
// Remove default values
|
||||||
|
//let builder = other.remove(&self.inner.read().unwrap()._env);
|
||||||
|
|
||||||
|
// TODO: Remove values that are defaults, above only checks those set by env and not the defaults
|
||||||
|
let builder = other;
|
||||||
|
|
||||||
|
// Serialize now before we consume the builder
|
||||||
|
let config_str = serde_json::to_string_pretty(&builder)?;
|
||||||
|
|
||||||
|
// Prepare the combined config
|
||||||
|
let config = {
|
||||||
|
let env = &self.inner.read().unwrap()._env;
|
||||||
|
env.merge(&builder, false).build()
|
||||||
|
};
|
||||||
|
validate_config(&config)?;
|
||||||
|
|
||||||
|
// Save both the user and the combined config
|
||||||
|
{
|
||||||
|
let mut writer = self.inner.write().unwrap();
|
||||||
|
writer.config = config;
|
||||||
|
writer._usr = builder;
|
||||||
|
}
|
||||||
|
|
||||||
|
//Save to file
|
||||||
|
use std::{fs::File, io::Write};
|
||||||
|
let mut file = File::create(&*CONFIG_FILE)?;
|
||||||
|
file.write_all(config_str.as_bytes())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||||
|
let builder = {
|
||||||
|
let usr = &self.inner.read().unwrap()._usr;
|
||||||
|
usr.merge(&other, false)
|
||||||
|
};
|
||||||
|
self.update_config(builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn delete_user_config(&self) -> Result<(), Error> {
|
||||||
|
crate::util::delete_file(&CONFIG_FILE)?;
|
||||||
|
|
||||||
|
// Empty user config
|
||||||
|
let usr = ConfigBuilder::default();
|
||||||
|
|
||||||
|
// Config now is env + defaults
|
||||||
|
let config = {
|
||||||
|
let env = &self.inner.read().unwrap()._env;
|
||||||
|
env.build()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Save configs
|
||||||
|
{
|
||||||
|
let mut writer = self.inner.write().unwrap();
|
||||||
|
writer.config = config;
|
||||||
|
writer._usr = usr;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn private_rsa_key(&self) -> String {
|
||||||
|
format!("{}.der", CONFIG.rsa_key_filename())
|
||||||
|
}
|
||||||
|
pub fn private_rsa_key_pem(&self) -> String {
|
||||||
|
format!("{}.pem", CONFIG.rsa_key_filename())
|
||||||
|
}
|
||||||
|
pub fn public_rsa_key(&self) -> String {
|
||||||
|
format!("{}.pub.der", CONFIG.rsa_key_filename())
|
||||||
|
}
|
||||||
|
pub fn mail_enabled(&self) -> bool {
|
||||||
|
let inner = &self.inner.read().unwrap().config;
|
||||||
|
inner._enable_smtp && inner.smtp_host.is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_duo_akey(&self) -> String {
|
||||||
|
if let Some(akey) = self._duo_akey() {
|
||||||
|
akey
|
||||||
|
} else {
|
||||||
|
let akey = crate::crypto::get_random_64();
|
||||||
|
let akey_s = data_encoding::BASE64.encode(&akey);
|
||||||
|
|
||||||
|
// Save the new value
|
||||||
|
let mut builder = ConfigBuilder::default();
|
||||||
|
builder._duo_akey = Some(akey_s.clone());
|
||||||
|
self.update_config_partial(builder).ok();
|
||||||
|
|
||||||
|
akey_s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn render_template<T: serde::ser::Serialize>(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
data: &T,
|
||||||
|
) -> Result<String, crate::error::Error> {
|
||||||
|
if CONFIG.reload_templates() {
|
||||||
|
warn!("RELOADING TEMPLATES");
|
||||||
|
let hb = load_templates(CONFIG.templates_folder().as_ref());
|
||||||
|
hb.render(name, data).map_err(Into::into)
|
||||||
|
} else {
|
||||||
|
let hb = &CONFIG.inner.read().unwrap().templates;
|
||||||
|
hb.render(name, data).map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
use handlebars::{
|
||||||
|
Context, Handlebars, Helper, HelperDef, HelperResult, Output, RenderContext, RenderError, Renderable,
|
||||||
|
};
|
||||||
|
|
||||||
|
fn load_templates(path: &str) -> Handlebars {
|
||||||
|
let mut hb = Handlebars::new();
|
||||||
|
// Error on missing params
|
||||||
|
hb.set_strict_mode(true);
|
||||||
|
// Register helpers
|
||||||
|
hb.register_helper("case", Box::new(CaseHelper));
|
||||||
|
hb.register_helper("jsesc", Box::new(JsEscapeHelper));
|
||||||
|
|
||||||
|
macro_rules! reg {
|
||||||
|
($name:expr) => {{
|
||||||
|
let template = include_str!(concat!("static/templates/", $name, ".hbs"));
|
||||||
|
hb.register_template_string($name, template).unwrap();
|
||||||
|
}};
|
||||||
|
($name:expr, $ext:expr) => {{
|
||||||
|
reg!($name);
|
||||||
|
reg!(concat!($name, $ext));
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
// First register default templates here
|
||||||
|
reg!("email/invite_accepted", ".html");
|
||||||
|
reg!("email/invite_confirmed", ".html");
|
||||||
|
reg!("email/new_device_logged_in", ".html");
|
||||||
|
reg!("email/pw_hint_none", ".html");
|
||||||
|
reg!("email/pw_hint_some", ".html");
|
||||||
|
reg!("email/send_org_invite", ".html");
|
||||||
|
|
||||||
|
reg!("admin/base");
|
||||||
|
reg!("admin/login");
|
||||||
|
reg!("admin/page");
|
||||||
|
|
||||||
|
// And then load user templates to overwrite the defaults
|
||||||
|
// Use .hbs extension for the files
|
||||||
|
// Templates get registered with their relative name
|
||||||
|
hb.register_templates_directory(".hbs", path).unwrap();
|
||||||
|
|
||||||
|
hb
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct CaseHelper;
|
||||||
|
|
||||||
|
impl HelperDef for CaseHelper {
|
||||||
|
fn call<'reg: 'rc, 'rc>(
|
||||||
|
&self,
|
||||||
|
h: &Helper<'reg, 'rc>,
|
||||||
|
r: &'reg Handlebars,
|
||||||
|
ctx: &Context,
|
||||||
|
rc: &mut RenderContext<'reg>,
|
||||||
|
out: &mut dyn Output,
|
||||||
|
) -> HelperResult {
|
||||||
|
let param = h
|
||||||
|
.param(0)
|
||||||
|
.ok_or_else(|| RenderError::new("Param not found for helper \"case\""))?;
|
||||||
|
let value = param.value().clone();
|
||||||
|
|
||||||
|
if h.params().iter().skip(1).any(|x| x.value() == &value) {
|
||||||
|
h.template().map(|t| t.render(r, ctx, rc, out)).unwrap_or(Ok(()))
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct JsEscapeHelper;
|
||||||
|
|
||||||
|
impl HelperDef for JsEscapeHelper {
|
||||||
|
fn call<'reg: 'rc, 'rc>(
|
||||||
|
&self,
|
||||||
|
h: &Helper<'reg, 'rc>,
|
||||||
|
_: &'reg Handlebars,
|
||||||
|
_: &Context,
|
||||||
|
_: &mut RenderContext<'reg>,
|
||||||
|
out: &mut dyn Output,
|
||||||
|
) -> HelperResult {
|
||||||
|
let param = h
|
||||||
|
.param(0)
|
||||||
|
.ok_or_else(|| RenderError::new("Param not found for helper \"js_escape\""))?;
|
||||||
|
|
||||||
|
let value = param
|
||||||
|
.value()
|
||||||
|
.as_str()
|
||||||
|
.ok_or_else(|| RenderError::new("Param for helper \"js_escape\" is not a String"))?;
|
||||||
|
|
||||||
|
let escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
||||||
|
let quoted_value = format!(""{}"", escaped_value);
|
||||||
|
|
||||||
|
out.write("ed_value)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@@ -1,8 +1,9 @@
|
|||||||
///
|
//
|
||||||
/// PBKDF2 derivation
|
// PBKDF2 derivation
|
||||||
///
|
//
|
||||||
|
|
||||||
use ring::{digest, pbkdf2};
|
use ring::{digest, hmac, pbkdf2};
|
||||||
|
use std::num::NonZeroU32;
|
||||||
|
|
||||||
static DIGEST_ALG: &digest::Algorithm = &digest::SHA256;
|
static DIGEST_ALG: &digest::Algorithm = &digest::SHA256;
|
||||||
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
|
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
|
||||||
@@ -10,18 +11,32 @@ const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
|
|||||||
pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {
|
pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {
|
||||||
let mut out = vec![0u8; OUTPUT_LEN]; // Initialize array with zeros
|
let mut out = vec![0u8; OUTPUT_LEN]; // Initialize array with zeros
|
||||||
|
|
||||||
|
let iterations = NonZeroU32::new(iterations).expect("Iterations can't be zero");
|
||||||
pbkdf2::derive(DIGEST_ALG, iterations, salt, secret, &mut out);
|
pbkdf2::derive(DIGEST_ALG, iterations, salt, secret, &mut out);
|
||||||
|
|
||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn verify_password_hash(secret: &[u8], salt: &[u8], previous: &[u8], iterations: u32) -> bool {
|
pub fn verify_password_hash(secret: &[u8], salt: &[u8], previous: &[u8], iterations: u32) -> bool {
|
||||||
|
let iterations = NonZeroU32::new(iterations).expect("Iterations can't be zero");
|
||||||
pbkdf2::verify(DIGEST_ALG, iterations, salt, secret, previous).is_ok()
|
pbkdf2::verify(DIGEST_ALG, iterations, salt, secret, previous).is_ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
//
|
||||||
/// Random values
|
// HMAC
|
||||||
///
|
//
|
||||||
|
pub fn hmac_sign(key: &str, data: &str) -> String {
|
||||||
|
use data_encoding::HEXLOWER;
|
||||||
|
|
||||||
|
let key = hmac::SigningKey::new(&digest::SHA1, key.as_bytes());
|
||||||
|
let signature = hmac::sign(&key, data.as_bytes());
|
||||||
|
|
||||||
|
HEXLOWER.encode(signature.as_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Random values
|
||||||
|
//
|
||||||
|
|
||||||
pub fn get_random_64() -> Vec<u8> {
|
pub fn get_random_64() -> Vec<u8> {
|
||||||
get_random(vec![0u8; 64])
|
get_random(vec![0u8; 64])
|
||||||
@@ -30,7 +45,18 @@ pub fn get_random_64() -> Vec<u8> {
|
|||||||
pub fn get_random(mut array: Vec<u8>) -> Vec<u8> {
|
pub fn get_random(mut array: Vec<u8>) -> Vec<u8> {
|
||||||
use ring::rand::{SecureRandom, SystemRandom};
|
use ring::rand::{SecureRandom, SystemRandom};
|
||||||
|
|
||||||
SystemRandom::new().fill(&mut array).expect("Error generating random values");
|
SystemRandom::new()
|
||||||
|
.fill(&mut array)
|
||||||
|
.expect("Error generating random values");
|
||||||
|
|
||||||
array
|
array
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Constant time compare
|
||||||
|
//
|
||||||
|
pub fn ct_eq<T: AsRef<[u8]>, U: AsRef<[u8]>>(a: T, b: U) -> bool {
|
||||||
|
use ring::constant_time::verify_slices_are_equal;
|
||||||
|
|
||||||
|
verify_slices_are_equal(a.as_ref(), b.as_ref()).is_ok()
|
||||||
|
}
|
||||||
|
@@ -1,39 +1,63 @@
|
|||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
|
||||||
use diesel::{Connection as DieselConnection, ConnectionError};
|
|
||||||
use diesel::sqlite::SqliteConnection;
|
|
||||||
use diesel::r2d2;
|
use diesel::r2d2;
|
||||||
use diesel::r2d2::ConnectionManager;
|
use diesel::r2d2::ConnectionManager;
|
||||||
|
use diesel::{Connection as DieselConnection, ConnectionError};
|
||||||
|
|
||||||
use rocket::http::Status;
|
use rocket::http::Status;
|
||||||
use rocket::request::{self, FromRequest};
|
use rocket::request::{self, FromRequest};
|
||||||
use rocket::{Outcome, Request, State};
|
use rocket::{Outcome, Request, State};
|
||||||
|
|
||||||
use CONFIG;
|
use crate::error::Error;
|
||||||
|
use chrono::prelude::*;
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
use crate::CONFIG;
|
||||||
|
|
||||||
/// An alias to the database connection used
|
/// An alias to the database connection used
|
||||||
type Connection = SqliteConnection;
|
#[cfg(feature = "sqlite")]
|
||||||
|
type Connection = diesel::sqlite::SqliteConnection;
|
||||||
|
#[cfg(feature = "mysql")]
|
||||||
|
type Connection = diesel::mysql::MysqlConnection;
|
||||||
|
|
||||||
/// An alias to the type for a pool of Diesel SQLite connections.
|
/// An alias to the type for a pool of Diesel connections.
|
||||||
type Pool = r2d2::Pool<ConnectionManager<Connection>>;
|
type Pool = r2d2::Pool<ConnectionManager<Connection>>;
|
||||||
|
|
||||||
/// Connection request guard type: a wrapper around an r2d2 pooled connection.
|
/// Connection request guard type: a wrapper around an r2d2 pooled connection.
|
||||||
pub struct DbConn(pub r2d2::PooledConnection<ConnectionManager<Connection>>);
|
pub struct DbConn(pub r2d2::PooledConnection<ConnectionManager<Connection>>);
|
||||||
|
|
||||||
pub mod schema;
|
|
||||||
pub mod models;
|
pub mod models;
|
||||||
|
#[cfg(feature = "sqlite")]
|
||||||
|
#[path = "schemas/sqlite/schema.rs"]
|
||||||
|
pub mod schema;
|
||||||
|
#[cfg(feature = "mysql")]
|
||||||
|
#[path = "schemas/mysql/schema.rs"]
|
||||||
|
pub mod schema;
|
||||||
|
|
||||||
/// Initializes a database pool.
|
/// Initializes a database pool.
|
||||||
pub fn init_pool() -> Pool {
|
pub fn init_pool() -> Pool {
|
||||||
let manager = ConnectionManager::new(&*CONFIG.database_url);
|
let manager = ConnectionManager::new(CONFIG.database_url());
|
||||||
|
|
||||||
r2d2::Pool::builder()
|
r2d2::Pool::builder().build(manager).expect("Failed to create pool")
|
||||||
.build(manager)
|
|
||||||
.expect("Failed to create pool")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_connection() -> Result<Connection, ConnectionError> {
|
pub fn get_connection() -> Result<Connection, ConnectionError> {
|
||||||
Connection::establish(&CONFIG.database_url)
|
Connection::establish(&CONFIG.database_url())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a back-up of the database using sqlite3
|
||||||
|
pub fn backup_database() -> Result<(), Error> {
|
||||||
|
let now: DateTime<Utc> = Utc::now();
|
||||||
|
let file_date = now.format("%Y%m%d").to_string();
|
||||||
|
let backup_command: String = format!("{}{}{}", ".backup 'db_", file_date, ".sqlite3'");
|
||||||
|
|
||||||
|
Command::new("sqlite3")
|
||||||
|
.current_dir("./data")
|
||||||
|
.args(&["db.sqlite3", &backup_command])
|
||||||
|
.output()
|
||||||
|
.expect("Can't open database, sqlite3 is not available, make sure it's installed and available on the PATH");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempts to retrieve a single connection from the managed database pool. If
|
/// Attempts to retrieve a single connection from the managed database pool. If
|
||||||
@@ -46,7 +70,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for DbConn {
|
|||||||
let pool = request.guard::<State<Pool>>()?;
|
let pool = request.guard::<State<Pool>>()?;
|
||||||
match pool.get() {
|
match pool.get() {
|
||||||
Ok(conn) => Outcome::Success(DbConn(conn)),
|
Ok(conn) => Outcome::Success(DbConn(conn)),
|
||||||
Err(_) => Outcome::Failure((Status::ServiceUnavailable, ()))
|
Err(_) => Outcome::Failure((Status::ServiceUnavailable, ())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
use serde_json::Value as JsonValue;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::Cipher;
|
use super::Cipher;
|
||||||
use CONFIG;
|
use crate::CONFIG;
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
||||||
#[table_name = "attachments"]
|
#[table_name = "attachments"]
|
||||||
@@ -12,6 +12,7 @@ pub struct Attachment {
|
|||||||
pub cipher_uuid: String,
|
pub cipher_uuid: String,
|
||||||
pub file_name: String,
|
pub file_name: String,
|
||||||
pub file_size: i32,
|
pub file_size: i32,
|
||||||
|
pub akey: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
@@ -22,15 +23,16 @@ impl Attachment {
|
|||||||
cipher_uuid,
|
cipher_uuid,
|
||||||
file_name,
|
file_name,
|
||||||
file_size,
|
file_size,
|
||||||
|
akey: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_file_path(&self) -> String {
|
pub fn get_file_path(&self) -> String {
|
||||||
format!("{}/{}/{}", CONFIG.attachments_folder, self.cipher_uuid, self.id)
|
format!("{}/{}/{}", CONFIG.attachments_folder(), self.cipher_uuid, self.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_json(&self, host: &str) -> JsonValue {
|
pub fn to_json(&self, host: &str) -> Value {
|
||||||
use util::get_display_size;
|
use crate::util::get_display_size;
|
||||||
|
|
||||||
let web_path = format!("{}/attachments/{}/{}", host, self.cipher_uuid, self.id);
|
let web_path = format!("{}/attachments/{}/{}", host, self.cipher_uuid, self.id);
|
||||||
let display_size = get_display_size(self.file_size);
|
let display_size = get_display_size(self.file_size);
|
||||||
@@ -41,55 +43,67 @@ impl Attachment {
|
|||||||
"FileName": self.file_name,
|
"FileName": self.file_name,
|
||||||
"Size": self.file_size.to_string(),
|
"Size": self.file_size.to_string(),
|
||||||
"SizeName": display_size,
|
"SizeName": display_size,
|
||||||
|
"Key": self.akey,
|
||||||
"Object": "attachment"
|
"Object": "attachment"
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
use crate::db::schema::attachments;
|
||||||
|
use crate::db::DbConn;
|
||||||
use diesel;
|
use diesel;
|
||||||
use diesel::prelude::*;
|
use diesel::prelude::*;
|
||||||
use db::DbConn;
|
|
||||||
use db::schema::attachments;
|
use crate::api::EmptyResult;
|
||||||
|
use crate::error::MapResult;
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl Attachment {
|
impl Attachment {
|
||||||
pub fn save(&self, conn: &DbConn) -> bool {
|
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||||
match diesel::replace_into(attachments::table)
|
diesel::replace_into(attachments::table)
|
||||||
.values(self)
|
.values(self)
|
||||||
.execute(&**conn) {
|
.execute(&**conn)
|
||||||
Ok(1) => true, // One row inserted
|
.map_res("Error saving attachment")
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
use util;
|
crate::util::retry(
|
||||||
|
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(&**conn),
|
||||||
|
10,
|
||||||
|
)
|
||||||
|
.map_res("Error deleting attachment")?;
|
||||||
|
|
||||||
util::delete_file(&self.get_file_path());
|
crate::util::delete_file(&self.get_file_path())?;
|
||||||
|
Ok(())
|
||||||
diesel::delete(
|
|
||||||
attachments::table.filter(
|
|
||||||
attachments::id.eq(self.id)
|
|
||||||
)
|
|
||||||
).execute(&**conn).and(Ok(()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> QueryResult<()> {
|
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
for attachement in Attachment::find_by_cipher(&cipher_uuid, &conn) {
|
for attachment in Attachment::find_by_cipher(&cipher_uuid, &conn) {
|
||||||
attachement.delete(&conn)?;
|
attachment.delete(&conn)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_id(id: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_id(id: &str, conn: &DbConn) -> Option<Self> {
|
||||||
|
let id = id.to_lowercase();
|
||||||
|
|
||||||
attachments::table
|
attachments::table
|
||||||
.filter(attachments::id.eq(id))
|
.filter(attachments::id.eq(id))
|
||||||
.first::<Self>(&**conn).ok()
|
.first::<Self>(&**conn)
|
||||||
|
.ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_cipher(cipher_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_cipher(cipher_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
attachments::table
|
attachments::table
|
||||||
.filter(attachments::cipher_uuid.eq(cipher_uuid))
|
.filter(attachments::cipher_uuid.eq(cipher_uuid))
|
||||||
.load::<Self>(&**conn).expect("Error loading attachments")
|
.load::<Self>(&**conn)
|
||||||
|
.expect("Error loading attachments")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_by_ciphers(cipher_uuids: Vec<String>, conn: &DbConn) -> Vec<Self> {
|
||||||
|
attachments::table
|
||||||
|
.filter(attachments::cipher_uuid.eq_any(cipher_uuids))
|
||||||
|
.load::<Self>(&**conn)
|
||||||
|
.expect("Error loading attachments")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user