Compare commits
830 commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
cc9c1d95ce | ||
![]() |
5ece0ee435 | ||
![]() |
e9d80607c3 | ||
![]() |
e46942674b | ||
![]() |
8dafef2857 | ||
![]() |
5468724771 | ||
![]() |
5ee4aa3d03 | ||
![]() |
c489189879 | ||
![]() |
84447e686d | ||
![]() |
9d17d7b505 | ||
![]() |
d3d26eac05 | ||
![]() |
ae254943a6 | ||
![]() |
988b7a8651 | ||
![]() |
b659d5a036 | ||
![]() |
873f515727 | ||
![]() |
bdea4e2932 | ||
![]() |
26a25719ff | ||
![]() |
98c90be47c | ||
![]() |
134f2934e9 | ||
![]() |
95626fb2cc | ||
![]() |
6cda0fefe5 | ||
![]() |
bd055f0037 | ||
![]() |
8162ce2571 | ||
![]() |
4917c42fbc | ||
![]() |
e53c864138 | ||
![]() |
896d719cfb | ||
![]() |
919031bd2a | ||
![]() |
67d115fc21 | ||
![]() |
11328412ab | ||
![]() |
3334c5d0c7 | ||
![]() |
c45cda4f70 | ||
![]() |
9fcc4cc1ad | ||
![]() |
b221a9d96e | ||
![]() |
653bb2ed1e | ||
![]() |
f6398f9cbb | ||
![]() |
1231c54c36 | ||
![]() |
8465b4e608 | ||
![]() |
5b0a268e97 | ||
![]() |
a69dbbc4d2 | ||
![]() |
4c4056c489 | ||
![]() |
99aae57e59 | ||
![]() |
da151150e5 | ||
![]() |
743dcdae95 | ||
![]() |
e8dc0c9c3d | ||
![]() |
1d3cc7cd4a | ||
![]() |
d7c26b49ed | ||
![]() |
ca40bab5ee | ||
![]() |
663b509e90 | ||
![]() |
c710aa83f0 | ||
![]() |
fc388a9ef7 | ||
![]() |
e43e8277fc | ||
![]() |
a67a765dd7 | ||
![]() |
9ce2204c4b | ||
![]() |
35fd6da287 | ||
![]() |
ce6eef8dc0 | ||
![]() |
b2db048671 | ||
![]() |
d416d2e56b | ||
![]() |
59cc5c7d38 | ||
![]() |
7f2667cea8 | ||
![]() |
2b3a45c800 | ||
![]() |
c155c96945 | ||
![]() |
2c639a2dd8 | ||
![]() |
967ddf6d2f | ||
![]() |
b75e22cabc | ||
![]() |
2747719f05 | ||
![]() |
486c97335f | ||
![]() |
13f97a6c76 | ||
![]() |
b0eb74b0af | ||
![]() |
0b4a1fea68 | ||
![]() |
23b11aabe9 | ||
![]() |
a0687e7c92 | ||
![]() |
646b0fddd3 | ||
![]() |
ef8e71007c | ||
![]() |
8282084d49 | ||
![]() |
1280bfd096 | ||
![]() |
7a9a5a21f2 | ||
![]() |
25d98cbce9 | ||
![]() |
adefcca2fc | ||
![]() |
0a0f62405a | ||
![]() |
c6f71d9600 | ||
![]() |
882ad6f7d1 | ||
![]() |
a05b290c20 | ||
![]() |
5cdf018adf | ||
![]() |
d28fcb6a22 | ||
![]() |
626b657082 | ||
![]() |
21a16a6c1c | ||
![]() |
42a4dcc63f | ||
![]() |
c0b78eb25d | ||
![]() |
5da84a5c74 | ||
![]() |
8c7636bd94 | ||
![]() |
f04455544e | ||
![]() |
09716a30c1 | ||
![]() |
4f0db18fc1 | ||
![]() |
a7a621267b | ||
![]() |
6136cf3206 | ||
![]() |
521d551b08 | ||
![]() |
422123139d | ||
![]() |
817fac6b45 | ||
![]() |
4bedb8d39b | ||
![]() |
2b3966b6a9 | ||
![]() |
48ac55f61d | ||
![]() |
f2fbb8bd8d | ||
![]() |
5554dcf701 | ||
![]() |
ed46bf384a | ||
![]() |
a869f6f04b | ||
![]() |
fe2c75389b | ||
![]() |
78fe0b32a4 | ||
![]() |
08779bc0b6 | ||
![]() |
2b8dbea4e2 | ||
![]() |
05461d6925 | ||
![]() |
c7e6f5a66f | ||
![]() |
321df862e7 | ||
![]() |
c9a87f9f0f | ||
![]() |
7b9d423878 | ||
![]() |
9acf738f10 | ||
![]() |
b58080579a | ||
![]() |
9b46b0120a | ||
![]() |
47821941b5 | ||
![]() |
c4504cbabb | ||
![]() |
c0b4dd1f8e | ||
![]() |
c6d76f51fb | ||
![]() |
34eb7d6e72 | ||
![]() |
dd741e94b0 | ||
![]() |
b66e482837 | ||
![]() |
e74cc2bae9 | ||
![]() |
736cb598d4 | ||
![]() |
b2467d4f40 | ||
![]() |
33a197f4a6 | ||
![]() |
680671b9dc | ||
![]() |
29ad2a507d | ||
![]() |
4f3edb61b3 | ||
![]() |
e31fbeb6f0 | ||
![]() |
024d79d001 | ||
![]() |
acf09bc160 | ||
![]() |
a948337a2f | ||
![]() |
5287224015 | ||
![]() |
e346316945 | ||
![]() |
cec948b083 | ||
![]() |
3ddd71eed4 | ||
![]() |
1db52b2591 | ||
![]() |
429d74472c | ||
![]() |
4191cf24b3 | ||
![]() |
f41c15ec10 | ||
![]() |
9283a79045 | ||
![]() |
53314b19a1 | ||
![]() |
025583d3ba | ||
![]() |
d1067ad6b2 | ||
![]() |
debb74a31d | ||
![]() |
12da07b0c2 | ||
![]() |
025ab2db46 | ||
![]() |
f31d5b3f73 | ||
![]() |
8f3966a675 | ||
![]() |
a1d72aa7bd | ||
![]() |
a510e28ef1 | ||
![]() |
d83b858be7 | ||
![]() |
1eb3a8a8e3 | ||
![]() |
4f82c16bb8 | ||
![]() |
9957a13b41 | ||
![]() |
f4586bc2c4 | ||
![]() |
da647f4b9c | ||
![]() |
ef55a9d434 | ||
![]() |
996e8a3451 | ||
![]() |
621a13df1a | ||
![]() |
8ec7255443 | ||
![]() |
3453266527 | ||
![]() |
812841d086 | ||
![]() |
ade4bd711d | ||
![]() |
8d385a3777 | ||
![]() |
1078ce537d | ||
![]() |
5815664417 | ||
![]() |
74a3f375e2 | ||
![]() |
0b1281d40f | ||
![]() |
3f087b0182 | ||
![]() |
f18530575c | ||
![]() |
3b94af2d5e | ||
![]() |
99539562f2 | ||
![]() |
0451cd9acd | ||
![]() |
5810f5f891 | ||
![]() |
8ac9598f15 | ||
![]() |
caca93f3a0 | ||
![]() |
775bc2c1dd | ||
![]() |
8b98a78b29 | ||
![]() |
72f8b4109a | ||
![]() |
a8c44d66aa | ||
![]() |
9a42192a6c | ||
![]() |
60be54059b | ||
![]() |
0e73986772 | ||
![]() |
35d68c88cd | ||
![]() |
85431b308d | ||
![]() |
086116f64d | ||
![]() |
42689cf902 | ||
![]() |
0488849a37 | ||
![]() |
ebffec9b0f | ||
![]() |
3e9c94c3ac | ||
![]() |
e2d9362f8a | ||
![]() |
3a81e988da | ||
![]() |
013f817c19 | ||
![]() |
d594c2aa0c | ||
![]() |
8a64930b59 | ||
![]() |
e0182cae6b | ||
![]() |
d8c187b176 | ||
![]() |
de9db43ae0 | ||
![]() |
558280a041 | ||
![]() |
cf5df3b60b | ||
![]() |
4585d750e1 | ||
![]() |
9c1120384b | ||
![]() |
fc0ca4c935 | ||
![]() |
e21eb1aef6 | ||
![]() |
0580215afd | ||
![]() |
f9ff4d10aa | ||
![]() |
abe03436d4 | ||
![]() |
234fb16394 | ||
![]() |
3546e31a7c | ||
![]() |
b0a6ce432a | ||
![]() |
6c5e34f2bf | ||
![]() |
3c7e27d9ad | ||
![]() |
fe5eac0104 | ||
![]() |
1e5e57a52b | ||
![]() |
540d5574d1 | ||
![]() |
1da35284be | ||
![]() |
4baac7ac04 | ||
![]() |
97adcbeb1b | ||
![]() |
c45870594b | ||
![]() |
932ad6b27c | ||
![]() |
519a101fdf | ||
![]() |
be74fc75ca | ||
![]() |
285b37a00d | ||
![]() |
f9a9c8c4bf | ||
![]() |
22d2bdff7e | ||
![]() |
2723becd96 | ||
![]() |
127f0c2c92 | ||
![]() |
31f09413e0 | ||
![]() |
72fd6d963b | ||
![]() |
a4262e9aae | ||
![]() |
4f41b84103 | ||
![]() |
09b75cb5be | ||
![]() |
0dda858ac1 | ||
![]() |
0c9b58b6ac | ||
![]() |
380ade13a3 | ||
![]() |
e94f70a15f | ||
![]() |
8bbfe7ac62 | ||
![]() |
7ae6c73b88 | ||
![]() |
efd5465837 | ||
![]() |
ef2f5b3f93 | ||
![]() |
e7aecf560d | ||
![]() |
bd4df39538 | ||
![]() |
2fe2d59129 | ||
![]() |
7ba5567e70 | ||
![]() |
0f7dc73f1a | ||
![]() |
1da37b5f85 | ||
![]() |
c634d294c7 | ||
![]() |
908ca2a45b | ||
![]() |
faec613b9a | ||
![]() |
406a7314ee | ||
![]() |
cf91caaf8c | ||
![]() |
13326985a9 | ||
![]() |
15d65dbc25 | ||
![]() |
05bdfd3855 | ||
![]() |
5ebeffe31c | ||
![]() |
5ca4136ebe | ||
![]() |
02098f9b76 | ||
![]() |
649163e06f | ||
![]() |
9d66bcb7d3 | ||
![]() |
dc4384d6ab | ||
![]() |
90fa3b30e9 | ||
![]() |
32ba41497a | ||
![]() |
d2eaaab4df | ||
![]() |
a8f5555324 | ||
![]() |
cbbd2aa6c8 | ||
![]() |
6f978e3b5d | ||
![]() |
94445800de | ||
![]() |
338ebcef80 | ||
![]() |
ccdbb950d1 | ||
![]() |
12af5c9d46 | ||
![]() |
b3f1b71054 | ||
![]() |
69be264eae | ||
![]() |
fd74707f07 | ||
![]() |
1ebdfc2eac | ||
![]() |
45d7b60d4c | ||
![]() |
4dfb9a9caf | ||
![]() |
ae9a2dd2ee | ||
![]() |
e26a8a2b39 | ||
![]() |
f7fc81a8ad | ||
![]() |
1af7cdcaa1 | ||
![]() |
cbcfa6d1f0 | ||
![]() |
9f9aed8176 | ||
![]() |
df0581b491 | ||
![]() |
5a7c4b54e6 | ||
![]() |
3eead2740e | ||
![]() |
3f472f594a | ||
![]() |
38b1e99673 | ||
![]() |
4df54390dc | ||
![]() |
58ca11f85e | ||
![]() |
2d1a06551c | ||
![]() |
e4b73c5be7 | ||
![]() |
5465a5e364 | ||
![]() |
eceb08317e | ||
![]() |
51154d7857 | ||
![]() |
4c74690c41 | ||
![]() |
9815453623 | ||
![]() |
81aad58c2f | ||
![]() |
65a443d778 | ||
![]() |
20559345b0 | ||
![]() |
ef812c6b82 | ||
![]() |
81fab84040 | ||
![]() |
a0c5a8c0e7 | ||
![]() |
72502ebbe6 | ||
![]() |
25eea5b9ab | ||
![]() |
df4ca7ccd0 | ||
![]() |
3c92e6ec06 | ||
![]() |
9be17e982b | ||
![]() |
1094b33665 | ||
![]() |
e2e20e3684 | ||
![]() |
95c6a8e28a | ||
![]() |
d7bcfda109 | ||
![]() |
048cbb8b6c | ||
![]() |
29a445da4e | ||
![]() |
4671dc8729 | ||
![]() |
050c898149 | ||
![]() |
ecb1dcd338 | ||
![]() |
2c4eaf0794 | ||
![]() |
c807c7cd17 | ||
![]() |
c1808161e4 | ||
![]() |
bd12e1a4c0 | ||
![]() |
edda041093 | ||
![]() |
e3b1fd298a | ||
![]() |
1a2d34c013 | ||
![]() |
00d6d1b4c7 | ||
![]() |
87ef005c17 | ||
![]() |
9941818a6e | ||
![]() |
f438be7e27 | ||
![]() |
efd5f4c54c | ||
![]() |
35188ef28e | ||
![]() |
e7963dbdaa | ||
![]() |
918a408357 | ||
![]() |
5fd0d158e6 | ||
![]() |
5265e3945a | ||
![]() |
a08a475f57 | ||
![]() |
ff7c368c7f | ||
![]() |
88ab453061 | ||
![]() |
4bae09e408 | ||
![]() |
668a4d91a7 | ||
![]() |
1a20de2f7f | ||
![]() |
350179fc89 | ||
![]() |
f3a6461eaa | ||
![]() |
fc17d850b5 | ||
![]() |
44c38d809b | ||
![]() |
5eb9368064 | ||
![]() |
72a3b51bd4 | ||
![]() |
f786769527 | ||
![]() |
23934bc693 | ||
![]() |
7e60b46732 | ||
![]() |
c178d878ab | ||
![]() |
390390fd43 | ||
![]() |
8119aca317 | ||
![]() |
2fd0f62484 | ||
![]() |
90eab744b1 | ||
![]() |
8547699061 | ||
![]() |
2b5127fbdb | ||
![]() |
4382413672 | ||
![]() |
516bec1deb | ||
![]() |
ede61b81d9 | ||
![]() |
59cca98e7f | ||
![]() |
2641d1e743 | ||
![]() |
3b2e8a4c70 | ||
![]() |
16fe4b1d28 | ||
![]() |
b34644f1a8 | ||
![]() |
7fa447943d | ||
![]() |
c9b4bd27e6 | ||
![]() |
38edadb3da | ||
![]() |
5da8431e3a | ||
![]() |
ccb94b1529 | ||
![]() |
e2cbd981ca | ||
![]() |
48f3c842b5 | ||
![]() |
f9179e3e21 | ||
![]() |
5c30a57280 | ||
![]() |
8410f84fe9 | ||
![]() |
d9aa6bd525 | ||
![]() |
ee547994dc | ||
![]() |
0316bc7a2c | ||
![]() |
c9f6320446 | ||
![]() |
b8e3df97dc | ||
![]() |
bac0d3c30c | ||
![]() |
db1e65b968 | ||
![]() |
1122d3728e | ||
![]() |
b696ec7b39 | ||
![]() |
a22a7d98c4 | ||
![]() |
a3bda5de8f | ||
![]() |
5f0c337f09 | ||
![]() |
fc93db2bc4 | ||
![]() |
421b3d3020 | ||
![]() |
1eaec0036e | ||
![]() |
afb16dcc96 | ||
![]() |
15f7a17935 | ||
![]() |
dcf15ca489 | ||
![]() |
a28dfec046 | ||
![]() |
8993f59001 | ||
![]() |
09c682cd7b | ||
![]() |
409e4bde3e | ||
![]() |
aaef85c49d | ||
![]() |
73b54f5504 | ||
![]() |
55da6a4841 | ||
![]() |
153c11babd | ||
![]() |
f13d08287f | ||
![]() |
fc123d10f9 | ||
![]() |
ded2124932 | ||
![]() |
909ca358f0 | ||
![]() |
af09aa96b1 | ||
![]() |
5d1640c1f8 | ||
![]() |
edc5188f08 | ||
![]() |
cf3fdc68db | ||
![]() |
fce42602a4 | ||
![]() |
27d838d8a3 | ||
![]() |
c2aa0af2f3 | ||
![]() |
020500d333 | ||
![]() |
f52f1a8f4f | ||
![]() |
bfbb77a5a0 | ||
![]() |
555bf2245d | ||
![]() |
36774d0b71 | ||
![]() |
428b57ebc9 | ||
![]() |
7cd85164e6 | ||
![]() |
6dab897ccd | ||
![]() |
a584144679 | ||
![]() |
2d97c90eb8 | ||
![]() |
60aff2aeb5 | ||
![]() |
4ee806eab0 | ||
![]() |
6d3cd2b3e2 | ||
![]() |
db46ab037e | ||
![]() |
ab46fcb298 | ||
![]() |
eda2878024 | ||
![]() |
cf11a3bd91 | ||
![]() |
2b61f3da56 | ||
![]() |
f49f454a42 | ||
![]() |
d3546b1ae0 | ||
![]() |
5f18f7f17f | ||
![]() |
bf39f85df0 | ||
![]() |
4ea2a11a44 | ||
![]() |
3a3117ab19 | ||
![]() |
572d540ead | ||
![]() |
9705538dba | ||
![]() |
22276d1cc6 | ||
![]() |
bb6be77f53 | ||
![]() |
a8b19e37ec | ||
![]() |
5bbe07de9e | ||
![]() |
328139d9b9 | ||
![]() |
14a9b09f74 | ||
![]() |
f4c271ebe8 | ||
![]() |
68c57e6766 | ||
![]() |
75c1b3400f | ||
![]() |
691ac58abf | ||
![]() |
7d6c69ae76 | ||
![]() |
f518d84da9 | ||
![]() |
3d81bc3ab0 | ||
![]() |
564718e516 | ||
![]() |
5745d54c21 | ||
![]() |
717167b55c | ||
![]() |
cbf431d960 | ||
![]() |
e2752458d4 | ||
![]() |
b7096d6df6 | ||
![]() |
d7698fd376 | ||
![]() |
233ffe8aaf | ||
![]() |
54e738fde8 | ||
![]() |
1f12cb24c2 | ||
![]() |
dd5f2c2a8d | ||
![]() |
9a0e6bf1d8 | ||
![]() |
3bcf85a92f | ||
![]() |
e87a6fa53f | ||
![]() |
a0d82ddc45 | ||
![]() |
1974a7e6ab | ||
![]() |
96d5986407 | ||
![]() |
f39b371d6b | ||
![]() |
ae52dafdaf | ||
![]() |
886a7f159e | ||
![]() |
068f4bb67e | ||
![]() |
0a8ff0297f | ||
![]() |
4bcd24ed19 | ||
![]() |
99c719eed4 | ||
![]() |
d2f3be9477 | ||
![]() |
3e7b825705 | ||
![]() |
c00ce3fd35 | ||
![]() |
9d7c24892d | ||
![]() |
22904d402a | ||
![]() |
02d4d79a60 | ||
![]() |
075551d218 | ||
![]() |
3b65fb039b | ||
![]() |
518c56def9 | ||
![]() |
c496b274f3 | ||
![]() |
3d0d69c541 | ||
![]() |
8f8bdcbefa | ||
![]() |
5237215bf0 | ||
![]() |
aa6afc37fe | ||
![]() |
8d16d7587d | ||
![]() |
af2174ee9f | ||
![]() |
fc7c748b20 | ||
![]() |
656c99446e | ||
![]() |
a73c4b67d9 | ||
![]() |
416d4a810f | ||
![]() |
21d245a5d8 | ||
![]() |
7ee263e8e8 | ||
![]() |
17a1cbdf40 | ||
![]() |
e9a907ef7b | ||
![]() |
e1643308b2 | ||
![]() |
264120c751 | ||
![]() |
7e0b964ca5 | ||
![]() |
6fed549f58 | ||
![]() |
1f610b84d7 | ||
![]() |
53afb1ba10 | ||
![]() |
d2e54d5cf0 | ||
![]() |
2a4fe20b88 | ||
![]() |
7913e27ee8 | ||
![]() |
6570117603 | ||
![]() |
8a5ccc732f | ||
![]() |
e741f97f9a | ||
![]() |
c27c13d617 | ||
![]() |
148fd335c5 | ||
![]() |
7465b2b701 | ||
![]() |
3cc90fdf8e | ||
![]() |
83530588d0 | ||
![]() |
22bfb69f28 | ||
![]() |
278c7aa61a | ||
![]() |
a7dd8b4a42 | ||
![]() |
58713e9b84 | ||
![]() |
6aa69e82aa | ||
![]() |
b37178ba98 | ||
![]() |
778c6c6a70 | ||
![]() |
72289e07d6 | ||
![]() |
a51744988f | ||
![]() |
e6f392a098 | ||
![]() |
12a413b4cb | ||
![]() |
6a277fd1e9 | ||
![]() |
cfcf870da3 | ||
![]() |
a507bc5f39 | ||
![]() |
ae18cb592e | ||
![]() |
ef31fc155b | ||
![]() |
3524bafda2 | ||
![]() |
66b4ef047a | ||
![]() |
2586e10285 | ||
![]() |
602d1cc673 | ||
![]() |
3542ab728f | ||
![]() |
22acb6fa84 | ||
![]() |
2a7ff7b3cb | ||
![]() |
889c85e248 | ||
![]() |
d301cf0447 | ||
![]() |
c611101987 | ||
![]() |
cac7cdcec6 | ||
![]() |
a07918d149 | ||
![]() |
68d6aa4180 | ||
![]() |
2226780086 | ||
![]() |
007e786221 | ||
![]() |
f5a400bb0a | ||
![]() |
0e5986d2df | ||
![]() |
86479db8af | ||
![]() |
eca395a770 | ||
![]() |
3792a5f6a1 | ||
![]() |
1f96e3ddac | ||
![]() |
8bc6151382 | ||
![]() |
ed79b72869 | ||
![]() |
e7e521edba | ||
![]() |
7d012726b7 | ||
![]() |
d6ea4cdde2 | ||
![]() |
f441ec0bfc | ||
![]() |
fb49a77180 | ||
![]() |
5dc6350106 | ||
![]() |
202246a3cd | ||
![]() |
467dfae320 | ||
![]() |
788a4c4f98 | ||
![]() |
0178b4c4d3 | ||
![]() |
68b080a3a8 | ||
![]() |
ef1a1fa057 | ||
![]() |
daf41b4b71 | ||
![]() |
0bca794fe7 | ||
![]() |
aaccb43471 | ||
![]() |
beb9abca16 | ||
![]() |
fb93d85119 | ||
![]() |
ceee197e68 | ||
![]() |
b0339610a2 | ||
![]() |
a98b447556 | ||
![]() |
b4c1805551 | ||
![]() |
0ef2e89cac | ||
![]() |
b76f0f109f | ||
![]() |
5f29516197 | ||
![]() |
ff1c12e848 | ||
![]() |
2ee2d08e5a | ||
![]() |
3103c94355 | ||
![]() |
a3be0011fb | ||
![]() |
ce39e1bd4f | ||
![]() |
6fb2fa783a | ||
![]() |
e76a643296 | ||
![]() |
6c155ad87f | ||
![]() |
81b8242c68 | ||
![]() |
d2cbf6ebbc | ||
![]() |
591be0791b | ||
![]() |
adee51bee5 | ||
![]() |
b214db6e9d | ||
![]() |
2694c05953 | ||
![]() |
c9b909e51d | ||
![]() |
db74c610ad | ||
![]() |
ea624351b5 | ||
![]() |
c1eb9f7216 | ||
![]() |
1a844d13ba | ||
![]() |
348a5d572b | ||
![]() |
77dcd771df | ||
![]() |
b566b39688 | ||
![]() |
8285657e5d | ||
![]() |
dd7fb325b6 | ||
![]() |
ab092faa2c | ||
![]() |
28681ef398 | ||
![]() |
eefd38a335 | ||
![]() |
261b380db7 | ||
![]() |
77e2dd2da6 | ||
![]() |
183136c1f1 | ||
![]() |
1fe0247095 | ||
![]() |
adab02a067 | ||
![]() |
58aa3162cb | ||
![]() |
405ee521a6 | ||
![]() |
9a3465aef1 | ||
![]() |
e23c57e58d | ||
![]() |
44749fe9e7 | ||
![]() |
f5d11bb008 | ||
![]() |
efa9d991ba | ||
![]() |
a7faafeba9 | ||
![]() |
f05abc07c9 | ||
![]() |
eeae863820 | ||
![]() |
9f9d1a65bd | ||
![]() |
a48840d1b2 | ||
![]() |
48de3d846c | ||
![]() |
122135dd80 | ||
![]() |
8576e576a6 | ||
![]() |
32e1e8a8ea | ||
![]() |
ed224215a4 | ||
![]() |
e9c03e512c | ||
![]() |
ed0c5aa89f | ||
![]() |
d5290e68ff | ||
![]() |
9de1bdd0b5 | ||
![]() |
00457b8b70 | ||
![]() |
e26600ad75 | ||
![]() |
310f560c65 | ||
![]() |
06ef8850fe | ||
![]() |
05a7d33c9f | ||
![]() |
baaba5311a | ||
![]() |
35014a15ca | ||
![]() |
2aa4c3c2c6 | ||
![]() |
0867d8f011 | ||
![]() |
a2071eb4d2 | ||
![]() |
e6402b793c | ||
![]() |
4cb84166c5 | ||
![]() |
b6be931641 | ||
![]() |
f51ab7ec0f | ||
![]() |
f22ec3a360 | ||
![]() |
de38e5e86f | ||
![]() |
bd9cb43960 | ||
![]() |
7763ceff4c | ||
![]() |
0e1a86f93b | ||
![]() |
0f0c728c90 | ||
![]() |
16d5a6e0c1 | ||
![]() |
0c5ab33b8a | ||
![]() |
cd91183b8b | ||
![]() |
12c4308b89 | ||
![]() |
bbf5d70d98 | ||
![]() |
60e57bce52 | ||
![]() |
460214f848 | ||
![]() |
334b98c01b | ||
![]() |
0493e5eb3d | ||
![]() |
dceaa984c9 | ||
![]() |
8abd1be5bb | ||
![]() |
d0cc43e89e | ||
![]() |
8c19ea68c8 | ||
![]() |
0649d56521 | ||
![]() |
628ea0224c | ||
![]() |
c9ec5347d5 | ||
![]() |
de3d7c7f4f | ||
![]() |
b0ea90c65b | ||
![]() |
0c7d0d0eaa | ||
![]() |
aec0761580 | ||
![]() |
77e0b8c313 | ||
![]() |
c659572df1 | ||
![]() |
37120a7324 | ||
![]() |
532907c27c | ||
![]() |
fb860fb861 | ||
![]() |
1c7e5274aa | ||
![]() |
7587efaed8 | ||
![]() |
f7d696007c | ||
![]() |
46e297386b | ||
![]() |
7d423f29da | ||
![]() |
41c0255ea6 | ||
![]() |
d5f0ceb15b | ||
![]() |
5f38e730d4 | ||
![]() |
c48ad0863d | ||
![]() |
4bc2b1bf03 | ||
![]() |
3d123f35a4 | ||
![]() |
d4519892f6 | ||
![]() |
0aa1a05c92 | ||
![]() |
69c535619d | ||
![]() |
5fe59c3bd8 | ||
![]() |
d8d0a6f190 | ||
![]() |
4d407b420d | ||
![]() |
181e3585b7 | ||
![]() |
2597af73ee | ||
![]() |
0ab220ebf0 | ||
![]() |
2777fc1f41 | ||
![]() |
91483a231d | ||
![]() |
95ea079f4d | ||
![]() |
8112f48270 | ||
![]() |
898f8be4db | ||
![]() |
a28ee97f13 | ||
![]() |
b01bf50aaf | ||
![]() |
86cc54ee88 | ||
![]() |
2fb1967ef1 | ||
![]() |
48e02ceb1c | ||
![]() |
c014e9635d | ||
![]() |
ca4946c87c | ||
![]() |
9ff9c3c4df | ||
![]() |
423914f63f | ||
![]() |
f6db541293 | ||
![]() |
efb51f8233 | ||
![]() |
acc64c2771 | ||
![]() |
780acd0384 | ||
![]() |
b014f73045 | ||
![]() |
bb8d2f27c6 | ||
![]() |
487c091ba7 | ||
![]() |
c3ebf8487b | ||
![]() |
51b15b6510 | ||
![]() |
f2c48d7efc | ||
![]() |
039f3c115a | ||
![]() |
80d9efa729 | ||
![]() |
e5f29f3c90 | ||
![]() |
01af362ff6 | ||
![]() |
98c7dd17d7 | ||
![]() |
70c152377d | ||
![]() |
b214bed014 | ||
![]() |
bde60734ea | ||
![]() |
362dd75473 | ||
![]() |
a7be2ca0a8 | ||
![]() |
da81f12877 | ||
![]() |
4e8a1e8ea9 | ||
![]() |
1b386ed32f | ||
![]() |
5a65ceb5b5 | ||
![]() |
c60d53ca3f | ||
![]() |
e7a41feef4 | ||
![]() |
ee3d667615 | ||
![]() |
df27ba4e5f | ||
![]() |
459db01e23 | ||
![]() |
f767179cc9 | ||
![]() |
749e7ee246 | ||
![]() |
3a7eda96fa | ||
![]() |
43ae92cf44 | ||
![]() |
2fb51f3b3a | ||
![]() |
d2dc43e1ef | ||
![]() |
db73a0656e | ||
![]() |
b3b983afe6 | ||
![]() |
273cab4759 | ||
![]() |
e1745bdea1 | ||
![]() |
c34570f665 | ||
![]() |
020cbb5355 | ||
![]() |
aea14c9ead | ||
![]() |
b57f6ddd1e | ||
![]() |
af6ce8854d | ||
![]() |
6069b214a5 | ||
![]() |
252051dfe7 | ||
![]() |
f9fa1bcc74 | ||
![]() |
f3f9f6ae72 | ||
![]() |
bdf095367d | ||
![]() |
4abb0e5ce6 | ||
![]() |
ba87ebfdaa | ||
![]() |
8a7e81815e | ||
![]() |
17eff81e9c | ||
![]() |
f8f1bc1757 | ||
![]() |
87a27e4f2b | ||
![]() |
7f8f3a01c3 | ||
![]() |
2ecef8c607 | ||
![]() |
d992a25a0a | ||
![]() |
73a5847753 | ||
![]() |
c976aea73e | ||
![]() |
4ada38988c | ||
![]() |
0010f99662 | ||
![]() |
e2f76c44cb | ||
![]() |
e26853c7fa | ||
![]() |
d64cbe6741 | ||
![]() |
c3809b5a98 | ||
![]() |
a3d40cc57c | ||
![]() |
e3fda4d464 | ||
![]() |
4bf245d13b | ||
![]() |
92925cecbd | ||
![]() |
f204cdf9b8 | ||
![]() |
ff4a394e3b | ||
![]() |
ce7b79b71a | ||
![]() |
b28cc2edd0 | ||
![]() |
84a741ec64 | ||
![]() |
6b37578d8d | ||
![]() |
d351a89096 | ||
![]() |
488da48df7 | ||
![]() |
85da099cd0 | ||
![]() |
bd8a9ca92d | ||
![]() |
1afbb89ef4 | ||
![]() |
b1d8e293de | ||
![]() |
7fdf9edb60 | ||
![]() |
0e7abb8d2c | ||
![]() |
2bac239763 | ||
![]() |
a90f135f06 | ||
![]() |
adee659baa | ||
![]() |
1e8f6305c9 | ||
![]() |
38b792a06e | ||
![]() |
1ee9c29805 | ||
![]() |
2e5639a50b | ||
![]() |
47dca8b835 | ||
![]() |
1ac79d6be7 | ||
![]() |
9a7f55bb52 | ||
![]() |
42852a85ea | ||
![]() |
c33229b53a | ||
![]() |
840662da48 | ||
![]() |
d8f14d9c9f | ||
![]() |
72e4134c86 | ||
![]() |
5b1e07b9c8 | ||
![]() |
2be185a371 | ||
![]() |
54a6a944aa | ||
![]() |
b86d2c715b | ||
![]() |
8f06b5b499 | ||
![]() |
6ec5a04802 | ||
![]() |
5080151b7c | ||
![]() |
c1f7146800 | ||
![]() |
743616fa09 | ||
![]() |
6e18b6f660 | ||
![]() |
50d67fc286 | ||
![]() |
c28642932a | ||
![]() |
969e269bd1 | ||
![]() |
8af45c9440 | ||
![]() |
6d29f504df | ||
![]() |
9b7f100f74 | ||
![]() |
e1485bfd04 | ||
![]() |
31c6bc6f96 |
166
.env
Normal file
|
@ -0,0 +1,166 @@
|
|||
# T-Pot config file. Do not remove.
|
||||
|
||||
###############################################
|
||||
# T-Pot Base Settings - Adjust to your needs. #
|
||||
###############################################
|
||||
|
||||
# Set Web usernames and passwords here. This section will be used to create / update the Nginx password file nginxpasswd.
|
||||
# <empty>: This is the default
|
||||
# <base64 encoded htpasswd usernames / passwords>:
|
||||
# Use 'htpasswd -n -b "username" "password" | base64 -w0' to create the WEB_USER if you want to manually deploy T-Pot, run 'install.sh' to automatically add a user during installation, or 'genuser.sh' if you just want to add a web user.
|
||||
# Example: 'htpasswd -n -b "tsec" "tsec" | base64 -w0' will print dHNlYzokYXByMSRYUnE2SC5rbiRVRjZQM1VVQmJVNWJUQmNmSGRuUFQxCgo=
|
||||
# Copy the string and replace WEB_USER=dHNlYzokYXByMSRYUnE2SC5rbiRVRjZQM1VVQmJVNWJUQmNmSGRuUFQxCgo=
|
||||
# Multiple users are possible:
|
||||
# WEB_USER=dHNlYzokYXByMSRYUnE2SC5rbiRVRjZQM1VVQmJVNWJUQmNmSGRuUFQxCgo= dHNlYzokYXByMSR6VUFHVWdmOCRROXI3a09CTjFjY3lCeU1DTloyanEvCgo=
|
||||
WEB_USER=
|
||||
|
||||
# Set Logstash Web usernames and passwords here. This section will be used to create / update the Nginx password file lswebpasswd.
|
||||
# The Lostsash Web usernames are used for T-Pot log ingestion via Logstash, each sensor should have its own user.
|
||||
# <empty>: This is empty by default.
|
||||
# <'htpasswd encoded usernames / passwords'>:
|
||||
# Use 'htpasswd -n -b "username" "password" | base64 -w0' to create the LS_WEB_USER if you want to manually deploy the sensor.
|
||||
# Example: 'htpasswd -n -b "sensor" "sensor" | base64 -w0' will print c2Vuc29yOiRhcHIxJGVpMHdzUmdYJHNyWHF4UG53ZzZqWUc3aEFaUWxrWDEKCg==
|
||||
# Copy the string and replace / add LS_WEB_USER=c2Vuc29yOiRhcHIxJGVpMHdzUmdYJHNyWHF4UG53ZzZqWUc3aEFaUWxrWDEKCg==
|
||||
# Multiple users are possible:
|
||||
# LS_WEB_USER=c2Vuc29yMTokYXByMSQ5aXhNRk5yMCR6d3F2dGFwQ2x0cFBhU1pqMm9ZemYxCgo= c2Vuc29yMjokYXByMSRtYTlOS1J2NCQvU3dsVVBMeW5RaVIyM3pyWVAzOUkwCgo=
|
||||
LS_WEB_USER=
|
||||
|
||||
# T-Pot Blackhole
|
||||
# ENABLED: T-Pot will download a db of known mass scanners and nullroute them.
|
||||
# Be aware, this will put T-Pot off the map for stealth reasons and
|
||||
# you will get less traffic. Routes will be active until next reboot
|
||||
# and will be re-added with every T-Pot start until disabled.
|
||||
# DISABLED: This is the default and no stealth efforts are in place.
|
||||
TPOT_BLACKHOLE=DISABLED
|
||||
|
||||
# T-Pot Persistence
|
||||
# on: This is the default. T-Pot will keep the honeypot logfiles and rotate
|
||||
# with logrotate for 30 days.
|
||||
# off: This is recommended for Raspberry Pi or setups with weaker CPUs or
|
||||
# if you just do not need any of the logfiles.
|
||||
TPOT_PERSISTENCE=on
|
||||
|
||||
# T-Pot Type
|
||||
# HIVE: This is the default and offers everything to connect T-Pot sensors.
|
||||
# SENSOR: This needs to be used when running a sensor. Be aware to adjust all other
|
||||
# settings as well.
|
||||
# 1. You will need to copy compose/sensor.yml to ./docker-compose.yml
|
||||
# 2. From HIVE host you will need to copy ~/tpotce/data/nginx/cert/nginx.crt to
|
||||
# your SENSOR host to ~/tpotce/data/hive.crt
|
||||
# 3. On HIVE: Create a web user per SENSOR on HIVE and provide credentials below
|
||||
# Create credentials with 'htpasswd ~/tpotce/data/nginx/conf/lswebpasswd <username>'
|
||||
# 4. On SENSOR: Provide username / password from (3) for TPOT_HIVE_USER as base64 encoded string:
|
||||
# "echo -n 'username:password' | base64 -w0"
|
||||
# MOBILE: This will set the correct type for T-Pot Mobile (https://github.com/telekom-security/tpotmobile)
|
||||
TPOT_TYPE=HIVE
|
||||
|
||||
# T-Pot Hive User (only relevant for SENSOR deployment)
|
||||
# <empty>: This is empty by default.
|
||||
# <base64 encoded string>: Provide a base64 encoded string "echo -n 'username:password' | base64 -w0"
|
||||
# i.e. TPOT_HIVE_USER='dXNlcm5hbWU6cGFzc3dvcmQ='
|
||||
TPOT_HIVE_USER=
|
||||
|
||||
# Logstash Sensor SSL verfication (only relevant on SENSOR hosts)
|
||||
# full: This is the default. Logstash, by default, verifies the complete certificate chain for ssl certificates.
|
||||
# This also includes the FQDN and sANs. By default T-Pot will only generate a self-signed certificate which
|
||||
# contains a sAN for the HIVE IP. In scenario where the HIVE needs to be accessed via Internet, maybe with
|
||||
# a different NAT address, a new certificate needs to be generated before deployment that includes all the
|
||||
# IPs and FQDNs as sANs for logstash successfully establishing a connection to the HIVE for transmitting
|
||||
# logs. Details here: https://github.com/telekom-security/tpotce?tab=readme-ov-file#distributed-deployment
|
||||
# none: This setting will disable the ssl verification check of logstash and should only be used in a testing
|
||||
# environment where IPs often change. It is not recommended for a production environment where trust between
|
||||
# HIVE and SENSOR is only established through a self signed certificate.
|
||||
LS_SSL_VERIFICATION=full
|
||||
|
||||
# T-Pot Hive IP (only relevant for SENSOR deployment)
|
||||
# <empty>: This is empty by default.
|
||||
# <IP, FQDN>: This can be either a IP (i.e. 192.168.1.1) or a FQDN (i.e. foo.bar.local)
|
||||
TPOT_HIVE_IP=
|
||||
|
||||
# T-Pot AttackMap Text Output
|
||||
# ENABLED: This is the default and the docker container map_data will print events to the console.
|
||||
# DISABLED: Printing events to the console is disabled.
|
||||
TPOT_ATTACKMAP_TEXT=ENABLED
|
||||
|
||||
# T-Pot AttackMap Text Output Timezone
|
||||
# UTC: (T-Pot default) This is usually the best option.
|
||||
# Continent/City: In Linux you can check our timezone with `readlink` /etc/localtime or
|
||||
# see the full list here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
|
||||
# Examples: America/New_York, Asia/Taipei, Australia/Melbourne, Europe/Athens, Europe/Berlin
|
||||
TPOT_ATTACKMAP_TEXT_TIMEZONE=UTC
|
||||
|
||||
###################################################################################
|
||||
# Honeypots / Tools settings
|
||||
###################################################################################
|
||||
# Some services / tools offer adjustments using ENVs which can be adjusted here.
|
||||
###################################################################################
|
||||
|
||||
# Suricata ET Pro ruleset
|
||||
# OPEN: This is the default and will the ET Open ruleset
|
||||
# OINKCODE: Replace OPEN with your Oinkcode to use the ET Pro ruleset
|
||||
OINKCODE=OPEN
|
||||
|
||||
# Beelzebub Honeypot supports LLMs such as ChatGPT and the Ollama backend.
|
||||
# Beelzebub is not part of the standard edition, please follow the README regarding setup.
|
||||
# It is recommended to use the Ollama backend to keep costs at bay.
|
||||
# Remember to rate limit API usage / set budget alerts when using ChatGPT API.
|
||||
# BEELZEBUB_LLM_MODEL: Set to "ollama" or "gpt4-o".
|
||||
# BEELZEBUB_LLM_HOST: When using "ollama" set it to the URL of your Ollama backend.
|
||||
# BEELZEBUB_OLLAMA_MODEL: Set to the model you are serving on your Ollama backend, i.e. "openchat".
|
||||
# BEELZEBUB_LLM_MODEL: "gpt4-o"
|
||||
# BEELZEBUB_OPENAISECRETKEY: "sk-proj-123456"
|
||||
BEELZEBUB_LLM_MODEL: "ollama"
|
||||
BEELZEBUB_LLM_HOST: "http://ollama.local:11434/api/chat"
|
||||
BEELZEBUB_OLLAMA_MODEL: "openchat"
|
||||
|
||||
# Galah is a LLM-powered web honeypot supporting various LLM backends.
|
||||
# Galah is not part of the standard edition, please follow the README regarding setup.
|
||||
# It is recommended to use the Ollama backend to keep costs at bay.
|
||||
# Remember to rate limit API usage / set budget alerts when using ChatGPT API.
|
||||
# GALAH_LLM_PROVIDER: Set to "ollama" or "gpt4-o".
|
||||
# GALAH_LLM_SERVER_URL: When using "ollama" set it to the URL of your Ollama backend.
|
||||
# GALAH_LLM_MODEL: Set to the model you are serving on your Ollama backend, i.e. "llama3".
|
||||
# GALAH_LLM_TEMPERATURE: "1"
|
||||
# GALAH_LLM_API_KEY: "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
# GALAH_LLM_CLOUD_LOCATION: ""
|
||||
# GALAH_LLM_CLOUD_PROJECT: ""
|
||||
GALAH_LLM_PROVIDER: "ollama"
|
||||
GALAH_LLM_SERVER_URL: "http://ollama.local:11434"
|
||||
GALAH_LLM_MODEL: "llama3.1"
|
||||
|
||||
|
||||
###################################################################################
|
||||
# NEVER MAKE CHANGES TO THIS SECTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!! #
|
||||
###################################################################################
|
||||
|
||||
# docker.sock Path
|
||||
TPOT_DOCKER_SOCK=/var/run/docker.sock
|
||||
|
||||
# docker compose .env
|
||||
TPOT_DOCKER_ENV=./.env
|
||||
|
||||
# Docker-Compose file
|
||||
TPOT_DOCKER_COMPOSE=./docker-compose.yml
|
||||
|
||||
# T-Pot Docker Repo
|
||||
# Depending on where you are located you may choose between DockerHub and GHCR
|
||||
# dtagdevsec: This will use the DockerHub image registry
|
||||
# ghcr.io/telekom-security: This will use the GitHub container registry
|
||||
TPOT_REPO=ghcr.io/telekom-security
|
||||
|
||||
# T-Pot Version Tag
|
||||
TPOT_VERSION=24.04.1
|
||||
|
||||
# T-Pot Pull Policy
|
||||
# always: (T-Pot default) Compose implementations SHOULD always pull the image from the registry.
|
||||
# never: Compose implementations SHOULD NOT pull the image from a registry and SHOULD rely on the platform cached image.
|
||||
# missing: Compose implementations SHOULD pull the image only if it's not available in the platform cache.
|
||||
# build: Compose implementations SHOULD build the image. Compose implementations SHOULD rebuild the image if already present.
|
||||
TPOT_PULL_POLICY=always
|
||||
|
||||
# T-Pot Data Path
|
||||
TPOT_DATA_PATH=./data
|
||||
|
||||
# OSType (linux, mac, win)
|
||||
# Most docker features are available on linux
|
||||
TPOT_OSTYPE=linux
|
44
.github/ISSUE_TEMPLATE/bug-report-for-t-pot.md
vendored
|
@ -1,37 +1,43 @@
|
|||
---
|
||||
name: Bug report for T-Pot
|
||||
about: Bug report for T-Pot
|
||||
name: Bug report for T-Pot 24.04.x
|
||||
about: Bug report for T-Pot 24.04.x
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
Before you post your issue make sure it has not been answered yet and provide `basic support information` if you come to the conclusion it is a new issue.
|
||||
# Successfully raise an issue
|
||||
Before you post your issue make sure it has not been answered yet and provide **⚠️ BASIC SUPPORT INFORMATION** (as requested below) if you come to the conclusion it is a new issue.
|
||||
|
||||
- 🔍 Use the [search function](https://github.com/dtag-dev-sec/tpotce/issues?utf8=%E2%9C%93&q=) first
|
||||
- 🧐 Check our [WIKI](https://github.com/dtag-dev-sec/tpotce/wiki)
|
||||
- 📚 Consult the documentation of 💻 [Debian](https://www.debian.org/doc/), 🐳 [Docker](https://docs.docker.com/), the 🦌 [ELK stack](https://www.elastic.co/guide/index.html) and the 🍯 [T-Pot Readme](https://github.com/dtag-dev-sec/tpotce/blob/master/README.md).
|
||||
- **⚠️ Provide [basic support information](#info) or similiar information with regard to your issue or we can not help you and will close the issue without further notice**
|
||||
- 🧐 Check our [Wiki](https://github.com/dtag-dev-sec/tpotce/wiki) and the [discussions](https://github.com/telekom-security/tpotce/discussions)
|
||||
- 📚 Consult the documentation of 💻 your Linux OS, 🐳 [Docker](https://docs.docker.com/), the 🦌 [Elastic stack](https://www.elastic.co/guide/index.html) and the 🍯 [T-Pot Readme](https://github.com/dtag-dev-sec/tpotce/blob/master/README.md).
|
||||
- ⚙️ The [Troubleshoot Section](https://github.com/telekom-security/tpotce?tab=readme-ov-file#troubleshooting) of the [T-Pot Readme](https://github.com/dtag-dev-sec/tpotce/blob/master/README.md) is a good starting point to collect a good set of information for the issue and / or to fix things on your own.
|
||||
- **⚠️ Provide [BASIC SUPPORT INFORMATION](#-basic-support-information-commands-are-expected-to-run-as-root) or similar detailed information with regard to your issue or we will close the issue or convert it into a discussion without further interaction from the maintainers**.<br>
|
||||
|
||||
<br>
|
||||
<br>
|
||||
<br>
|
||||
# ⚠️ Basic support information (commands are expected to run as `root`)
|
||||
|
||||
<a name="info"></a>
|
||||
## ⚠️ Basic support information (commands are expected to run as `root`)
|
||||
**We happily take the time to improve T-Pot and take care of things, but we need you to take the time to create an issue that provides us with all the information we need.**
|
||||
|
||||
- What version of the OS are you currently using `lsb_release -a` and `uname -a`?
|
||||
- What T-Pot version are you currently using?
|
||||
- What edition (Standard, Nextgen, etc.) of T-Pot are you running?
|
||||
- What OS are you T-Pot running on?
|
||||
- What is the version of the OS `lsb_release -a` and `uname -a`?
|
||||
- What T-Pot version are you currently using (only **T-Pot 24.04.x** is currently supported)?
|
||||
- What architecture are you running on (i.e. hardware, cloud, VM, etc.)?
|
||||
- Did you have any problems during the install? If yes, please attach `/install.log` `/install.err`.
|
||||
- Review the `~/install_tpot.log`, attach the log and highlight the errors.
|
||||
- How long has your installation been running?
|
||||
- If it is a fresh install consult the documentation first.
|
||||
- Most likely it is a port conflict or a remote dependency was unavailable.
|
||||
- Retry a fresh installation and only open the issue if the error keeps coming up and is not resolved using the documentation as described [here](#how-to-raise-an-issue).
|
||||
- Did you install upgrades, packages or use the update script?
|
||||
- Did you modify any scripts or configs? If yes, please attach the changes.
|
||||
- Please provide a screenshot of `glances` and `htop`.
|
||||
- Please provide a screenshot of `htop` and `docker stats`.
|
||||
- How much free disk space is available (`df -h`)?
|
||||
- What is the current container status (`dps.sh`)?
|
||||
- What is the status of the T-Pot service (`systemctl status tpot`)?
|
||||
- What ports are being occupied? Stop T-Pot `systemctl stop tpot` and run `netstat -tulpen`
|
||||
- What is the current container status (`dps`)?
|
||||
- On Linux: What is the status of the T-Pot service (`systemctl status tpot`)?
|
||||
- What ports are being occupied? Stop T-Pot `systemctl stop tpot` and run `grc netstat -tulpen`
|
||||
- Stop T-Pot `systemctl stop tpot`
|
||||
- Run `grc netstat -tulpen`
|
||||
- Run T-Pot manually with `docker compose -f ~/tpotce/docker-compose.yml up` and check for errors
|
||||
- Stop execution with `CTRL-C` and `docker compose -f ~/tpotce/docker-compose.yml down -v`
|
||||
- If a single container shows as `DOWN` you can run `docker logs <container-name>` for the latest log entries
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
name: Feature request for T-Pot
|
||||
about: Suggest an idea for T-Pot
|
||||
name: Feature request for T-Pot 24.04.x
|
||||
about: Suggest an idea for T-Pot 24.04.x
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
|
|
@ -1,37 +1,43 @@
|
|||
---
|
||||
name: General issue for T-Pot
|
||||
about: General issue for T-Pot
|
||||
name: General issue for T-Pot 24.04.x
|
||||
about: General issue for T-Pot 24.04.x
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
Before you post your issue make sure it has not been answered yet and provide `basic support information` if you come to the conclusion it is a new issue.
|
||||
# Successfully raise an issue
|
||||
Before you post your issue make sure it has not been answered yet and provide **⚠️ BASIC SUPPORT INFORMATION** (as requested below) if you come to the conclusion it is a new issue.
|
||||
|
||||
- 🔍 Use the [search function](https://github.com/dtag-dev-sec/tpotce/issues?utf8=%E2%9C%93&q=) first
|
||||
- 🧐 Check our [WIKI](https://github.com/dtag-dev-sec/tpotce/wiki)
|
||||
- 📚 Consult the documentation of 💻 [Debian](https://www.debian.org/doc/), 🐳 [Docker](https://docs.docker.com/), the 🦌 [ELK stack](https://www.elastic.co/guide/index.html) and the 🍯 [T-Pot Readme](https://github.com/dtag-dev-sec/tpotce/blob/master/README.md).
|
||||
- **⚠️ Provide [basic support information](#info) or similiar information with regard to your issue or we can not help you and will close the issue without further notice**
|
||||
- 🧐 Check our [Wiki](https://github.com/dtag-dev-sec/tpotce/wiki) and the [discussions](https://github.com/telekom-security/tpotce/discussions)
|
||||
- 📚 Consult the documentation of 💻 your Linux OS, 🐳 [Docker](https://docs.docker.com/), the 🦌 [Elastic stack](https://www.elastic.co/guide/index.html) and the 🍯 [T-Pot Readme](https://github.com/dtag-dev-sec/tpotce/blob/master/README.md).
|
||||
- ⚙️ The [Troubleshoot Section](https://github.com/telekom-security/tpotce?tab=readme-ov-file#troubleshooting) of the [T-Pot Readme](https://github.com/dtag-dev-sec/tpotce/blob/master/README.md) is a good starting point to collect a good set of information for the issue and / or to fix things on your own.
|
||||
- **⚠️ Provide [BASIC SUPPORT INFORMATION](#-basic-support-information-commands-are-expected-to-run-as-root) or similar detailed information with regard to your issue or we will close the issue or convert it into a discussion without further interaction from the maintainers**.<br>
|
||||
|
||||
<br>
|
||||
<br>
|
||||
<br>
|
||||
# ⚠️ Basic support information (commands are expected to run as `root`)
|
||||
|
||||
<a name="info"></a>
|
||||
## ⚠️ Basic support information (commands are expected to run as `root`)
|
||||
**We happily take the time to improve T-Pot and take care of things, but we need you to take the time to create an issue that provides us with all the information we need.**
|
||||
|
||||
- What version of the OS are you currently using `lsb_release -a` and `uname -a`?
|
||||
- What T-Pot version are you currently using?
|
||||
- What edition (Standard, Nextgen, etc.) of T-Pot are you running?
|
||||
- What OS are you T-Pot running on?
|
||||
- What is the version of the OS `lsb_release -a` and `uname -a`?
|
||||
- What T-Pot version are you currently using (only **T-Pot 24.04.x** is currently supported)?
|
||||
- What architecture are you running on (i.e. hardware, cloud, VM, etc.)?
|
||||
- Did you have any problems during the install? If yes, please attach `/install.log` `/install.err`.
|
||||
- Review the `~/install_tpot.log`, attach the log and highlight the errors.
|
||||
- How long has your installation been running?
|
||||
- If it is a fresh install consult the documentation first.
|
||||
- Most likely it is a port conflict or a remote dependency was unavailable.
|
||||
- Retry a fresh installation and only open the issue if the error keeps coming up and is not resolved using the documentation as described [here](#how-to-raise-an-issue).
|
||||
- Did you install upgrades, packages or use the update script?
|
||||
- Did you modify any scripts or configs? If yes, please attach the changes.
|
||||
- Please provide a screenshot of `glances` and `htop`.
|
||||
- Please provide a screenshot of `htop` and `docker stats`.
|
||||
- How much free disk space is available (`df -h`)?
|
||||
- What is the current container status (`dps.sh`)?
|
||||
- What is the status of the T-Pot service (`systemctl status tpot`)?
|
||||
- What ports are being occupied? Stop T-Pot `systemctl stop tpot` and run `netstat -tulpen`
|
||||
- What is the current container status (`dps`)?
|
||||
- On Linux: What is the status of the T-Pot service (`systemctl status tpot`)?
|
||||
- What ports are being occupied? Stop T-Pot `systemctl stop tpot` and run `grc netstat -tulpen`
|
||||
- Stop T-Pot `systemctl stop tpot`
|
||||
- Run `grc netstat -tulpen`
|
||||
- Run T-Pot manually with `docker compose -f ~/tpotce/docker-compose.yml up` and check for errors
|
||||
- Stop execution with `CTRL-C` and `docker compose -f ~/tpotce/docker-compose.yml down -v`
|
||||
- If a single container shows as `DOWN` you can run `docker logs <container-name>` for the latest log entries
|
||||
|
|
49
.github/workflows/basic-support-info.yml
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
name: "Check Basic Support Info"
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, edited]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check-issue:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install jq
|
||||
run: sudo apt-get install jq -y
|
||||
|
||||
- name: Check issue for basic support info
|
||||
id: check_issue
|
||||
run: |
|
||||
REQUIRED_INFO=("What OS are you T-Pot running on?" "What is the version of the OS" "What T-Pot version are you currently using" "What architecture are you running on" "Review the \`~/install_tpot.log\`" "How long has your installation been running?" "Did you install upgrades, packages or use the update script?" "Did you modify any scripts or configs?" "Please provide a screenshot of \`htop\` and \`docker stats\`." "How much free disk space is available" "What is the current container status" "What is the status of the T-Pot service" "What ports are being occupied?")
|
||||
|
||||
ISSUE_BODY=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.body')
|
||||
MISSING_INFO=()
|
||||
|
||||
for info in "${REQUIRED_INFO[@]}"; do
|
||||
if [[ "$ISSUE_BODY" != *"$info"* ]]; then
|
||||
MISSING_INFO+=("$info")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#MISSING_INFO[@]} -ne 0 ]; then
|
||||
echo "missing=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "missing=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Add "no basic support info" label if necessary
|
||||
if: env.missing == 'true'
|
||||
run: gh issue edit "$NUMBER" --add-label "$LABELS"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
LABELS: no basic support info
|
24
.github/workflows/stale.yml
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
name: "Tag stale issues and pull requests"
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *" # Runs every day at midnight
|
||||
workflow_dispatch: # Allows the workflow to be triggered manually
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v7
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: "This issue has been marked as stale because it has had no activity for 7 days. If you are still experiencing this issue, please comment or it will be closed in 7 days."
|
||||
stale-pr-message: "This pull request has been marked as stale because it has had no activity for 7 days. If you are still working on this, please comment or it will be closed in 7 days."
|
||||
days-before-stale: 7
|
||||
days-before-close: 7
|
||||
stale-issue-label: "stale"
|
||||
exempt-issue-labels: "keep-open"
|
||||
stale-pr-label: "stale"
|
||||
exempt-pr-labels: "keep-open"
|
||||
operations-per-run: 30
|
||||
debug-only: false
|
6
.gitignore
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
# Ignore data folder
|
||||
data/
|
||||
_data/
|
||||
**/.DS_Store
|
||||
.idea
|
||||
install_tpot.log
|
270
CHANGELOG.md
|
@ -1,236 +1,46 @@
|
|||
# Changelog
|
||||
# Release Notes / Changelog
|
||||
T-Pot 24.04.1 brings significant updates and exciting new honeypot additions, especially the LLM-based honeypots **Beelzebub** and **Galah**!
|
||||
|
||||
## 20200630
|
||||
- **Release T-Pot 20.06**
|
||||
- After 4 months of public testing with the NextGen edition T-Pot 20.06 can finally be released.
|
||||
- **Debian Buster**
|
||||
- With the release of Debian Buster T-Pot now has access to all packages required right out of the box.
|
||||
- **Add new honeypots**
|
||||
- [Dicompot](https://github.com/nsmfoo/dicompot) by @nsmfoo is a low interaction honeypot for the Dicom protocol which is the international standard to process medical imaging information. Together with Medpot which supports the HL7 protocol T-Pot is now offering a Medical Installation type.
|
||||
- [Honeysap](https://github.com/SecureAuthCorp/HoneySAP) by SecureAuthCorp is a low interaction honeypot for the SAP services, in case of T-Pot configured for the SAP router.
|
||||
- [Elasticpot](https://gitlab.com/bontchev/elasticpot) by Vesselin Bontchev replaces ElasticpotPY as a low interaction honeypot for Elasticsearch with more features, plugins and scripted responses.
|
||||
- **Rebuild Images**
|
||||
- All docker images were rebuilt based on the latest (and stable running) versions of the tools and honeypots. Mostly the images now run on Alpine 3.12 / Debian Buster. However some honeypots / tools still reuire Alpine 3.11 / 3.10 to run properly.
|
||||
- **Install Types**
|
||||
- All docker-compose files (`/opt/tpot/etc/compose`) were remixed and most of the NextGen honeypots are now available in Standard.
|
||||
- There is now a **Medical** Installation Type with Dicompot and Medpot which will be of most interest for medical institutions to get started with T-Pot.
|
||||
- **Update Tools**
|
||||
- Connecting to T-Pot via `https://<ip>:64297` brings you to the T-Pot Landing Page now which is based on Heimdall and the latest NGINX enforcing TLS 1.3.
|
||||
- The ELK stack was updated to 7.8.0 and stripped down to the necessary core functions (where possible) for T-Pot while keeping ELK RAM requirements to a minimum (8GB of RAM is recommended now). The number of index pattern fields was reduced to **697** which increases performance significantly. There are **22** Kibana Dashboards, **397** Kibana Visualizations and **24** Kibana Searches readily available to cover all your needs to get started and familiar with T-Pot.
|
||||
- Cyberchef was updated to 9.21.0.
|
||||
- Elasticsearch Head was updated to the latest version available on GitHub.
|
||||
- Spiderfoot was updated to latest 3.1 dev.
|
||||
- **Landing Page**
|
||||
- After logging into T-Pot via web you are now greeted with a beautifully designed landing page.
|
||||
- **Countless Tweaks and improvements**
|
||||
- Under the hood lots of tiny tweaks, improvements and a few bugfixes will increase your overall experience with T-Pot.
|
||||
## New Features
|
||||
* **Beelzebub** (SSH) and **Galah** (HTTP) are the first LLM-based honeypots included in T-Pot (requires Ollama installation or a ChatGPT subscription).
|
||||
* **Go-Pot** a HTTP tarpit designed to maximize bot misery by slowly feeding them an infinite stream of fake secrets.
|
||||
* **Honeyaml** a configurable API server honeypot even supporting JWT-based HTTP bearer/token authentication.
|
||||
* **H0neytr4p** a HTTP/S honeypot capable of emulating vulnerabilities using configurable traps.
|
||||
* **Miniprint** a medium-interaction printer honeypot.
|
||||
|
||||
## 20200316
|
||||
- **Move from Sid to Stable**
|
||||
- Debian Stable has now all the packages and versions we need for T-Pot. As a consequence we can now move to the `stable` branch.
|
||||
## Updates
|
||||
* **Honeypots** were updated to their latest pushed code and / or releases.
|
||||
* **Editions** have been re-introduced. You can now additionally choose to install T-Pot as **Mini**, **LLM** and **Tarpit** edition.
|
||||
* **Attack Map** has been updated to 2.2.6 including support for all new honeypots.
|
||||
* **Elastic Stack** has been upgrade to 8.16.1.
|
||||
* **Cyberchef** has been updated to the latest release.
|
||||
* **Elasticvue** has been updated to 1.1.0.
|
||||
* **Suricata** has been updated to 7.0.7, now supporting JA4 hashes.
|
||||
* Most honeypots now use **PyInstaller** (for Python) and **Scratch** (for Go) to minimize Docker image sizes.
|
||||
* All new honeypots have been integrated with **Kibana**, featuring dedicated dashboards and visualizations.
|
||||
* **Github Container Registry** is now the default container registry for the T-Pot configuration file `.env`.
|
||||
* Compatibility tested with **Alma 9.5**, **Fedora 41**, **Rocky 9.5**, and **Ubuntu 24.04.1**, with updated supported ISO links.
|
||||
* Docker images now use **Alpine 3.20** or **Scratch** wherever possible.
|
||||
* Updates for `24.04.1` images will be provided continuously through Docker image updates.
|
||||
* **Ddospot** has been moved from the Hive / Sensor installation to the Tarpit installation.
|
||||
|
||||
## 20200310
|
||||
- **Add 2FA to Cockpit**
|
||||
- Just run `2fa.sh` to enable two factor authentication in Cockpit.
|
||||
- **Find fastest mirror with netselect-apt**
|
||||
- Netselect-apt will find the fastest mirror close to you (outgoing ICMP required).
|
||||
## Breaking Changes
|
||||
### NGINX
|
||||
- The container no longer runs in host mode, requiring changes to the `docker-compose.yml` and related services.
|
||||
- To avoid confusion and downtime, the `24.04.1` tag for Docker images has been introduced.
|
||||
- **Important**: Actively update T-Pot as described in the [README](https://github.com/telekom-security/tpotce/blob/master/README.md).
|
||||
- **Deprecation Notice**: The `24.04` tagged images will no longer be maintained and will be removed by **2025-01-31**.
|
||||
|
||||
## 20200309
|
||||
- **Bump Nextgen to 20.06**
|
||||
- All NextGen images have been rebuilt to their latest master.
|
||||
- ElasticStack bumped to 7.6.1 (Elasticsearch will need at least 2048MB of RAM now, T-Pot at least 8GB of RAM) and tweak to accomodate changes of 7.x.
|
||||
- Fixed errors in Tanner / Snare which will now handle downloads of malware via SSL and store them correctly (thanks to @afeena).
|
||||
- Fixed errors in Heralding which will now improve on RDP connections (thanks to @johnnykv, @realsdx).
|
||||
- Fixed error in honeytrap which will now build in Debian/Buster (thanks to @tillmannw).
|
||||
- Mailoney is now logging in JSON format (thanks to @monsherko).
|
||||
- Base T-Pot landing page on Heimdall.
|
||||
- Tweaking of tools and some minor bug fixing
|
||||
### Suricata
|
||||
- Capture filters have been updated to exclude broadcast, multicast, NetBIOS, IGMP, and MDNS traffic.
|
||||
|
||||
## 20200116
|
||||
- **Bump ELK to latest 6.8.6**
|
||||
- **Update ISO image to fix upstream bug of missing kernel modules**
|
||||
- **Include dashboards for CitrixHoneypot**
|
||||
- Please run `/opt/tpot/update.sh` for the necessary modifications, omit the reboot and run `/opt/tpot/bin/tped.sh` to (re-)select the NextGen installation type.
|
||||
- This update requires the latest Kibana objects as well. Download the latest from https://raw.githubusercontent.com/dtag-dev-sec/tpotce/master/etc/objects/kibana_export.json.zip, unzip and import the objects within Kibana WebUI > Management > Saved Objects > Export / Import". All objects will be overwritten upon import, make sure to run an export first.
|
||||
## Thanks & Credits
|
||||
A heartfelt thank you to the contributors who made this release possible:
|
||||
* @elivlo, @mancasa, koalafiedTroll, @trixam, for their backend and ews support!
|
||||
* @mariocandela for his work and updates on Beelzebub based on our discussions!
|
||||
* @ryanolee for approaching us and adding valuable features to go-pot based on our discussions!
|
||||
* @neon-ninja for the work on #1661!
|
||||
* @sarkoziadam for the work on #1643!
|
||||
* @glaslos for the work on #1538!
|
||||
|
||||
## 20200115
|
||||
- **Prepare integration of CitrixHoneypot**
|
||||
- Prepare integration of [CitrixHoneypot](https://github.com/MalwareTech/CitrixHoneypot) by MalwareTech
|
||||
- Integration into ELK is still open
|
||||
- Please run `/opt/tpot/update.sh` for the necessary modifications, omit the reboot and run `/opt/tpot/bin/tped.sh` to (re-)select the NextGen installation type.
|
||||
|
||||
## 20191224
|
||||
- **Use pigz, optimize logrotate.conf**
|
||||
- Use `pigz` for faster archiving, especially with regard to high volumes of logs - Thanks to @workandresearchgithub!
|
||||
- Optimize `logrotate.conf` to improve archiving speed and get rid of multiple compression, also introduce `pigz`.
|
||||
|
||||
## 20191121
|
||||
- **Bump ADBHoney to latest master**
|
||||
- Use latest version of ADBHoney, which now fully support Python 3.x - Thanks to @huuck!
|
||||
|
||||
## 20191113, 20191104, 20191103, 20191028
|
||||
- **Switch to Debian 10 on OTC, Ansible Improvements**
|
||||
- OTC now supporting Debian 10 - Thanks to @shaderecker!
|
||||
|
||||
## 20191028
|
||||
- **Fix an issue with pip3, yq**
|
||||
- `yq` needs rehashing.
|
||||
|
||||
## 20191026
|
||||
- **Remove cockpit-pcp**
|
||||
- `cockpit-pcp` floods swap for some reason - removing for now.
|
||||
|
||||
## 20191022
|
||||
- **Bump Suricata to 5.0.0**
|
||||
|
||||
## 20191021
|
||||
- **Bump Cowrie to 2.0.0**
|
||||
|
||||
## 20191016
|
||||
- **Tweak installer, pip3, Heralding**
|
||||
- Install `cockpit-pcp` right from the start for machine monitoring in cockpit.
|
||||
- Move installer and update script to use pip3.
|
||||
- Bump heralding to latest master (1.0.6) - Thanks @johnnykv!
|
||||
|
||||
## 20191015
|
||||
- **Tweaking, Bump glutton, unlock ES script**
|
||||
- Add `unlock.sh` to unlock ES indices in case of lockdown after disk quota has been reached.
|
||||
- Prevent too much terminal logging from p0f and glutton since `daemon.log` was filled up.
|
||||
- Bump glutton to latest master now supporting payload_hex. Thanks to @glaslos.
|
||||
|
||||
## 20191002
|
||||
- **Merge**
|
||||
- Support Debian Buster images for AWS #454
|
||||
- Thank you @piffey
|
||||
|
||||
## 20190924
|
||||
- **Bump EWSPoster**
|
||||
- Supports Python 3.x
|
||||
- Thank you @Trixam
|
||||
|
||||
## 20190919
|
||||
- **Merge**
|
||||
- Handle non-interactive shells #454
|
||||
- Thank you @Oogy
|
||||
|
||||
## 20190907
|
||||
- **Logo tweaking**
|
||||
- Add QR logo
|
||||
|
||||
## 20190828
|
||||
- **Upgrades and rebuilds**
|
||||
- Bump Medpot, Nginx and Adbhoney to latest master
|
||||
- Bump ELK stack to 6.8.2
|
||||
- Rebuild Mailoney, Honeytrap, Elasticpot and Ciscoasa
|
||||
- Add 1080p T-Pot wallpaper for download
|
||||
|
||||
## 20190824
|
||||
- **Add some logo work**
|
||||
- Thanks to @thehadilps's suggestion adjusted social preview
|
||||
- Added 4k T-Pot wallpaper for download
|
||||
|
||||
## 20190823
|
||||
- **Fix for broken Fuse package**
|
||||
- Fuse package in upstream is broken
|
||||
- Adjust installer as workaround, fixes #442
|
||||
|
||||
## 20190816
|
||||
- **Upgrades and rebuilds**
|
||||
- Adjust Dionaea to avoid nmap detection, fixes #435 (thanks @iukea1)
|
||||
- Bump Tanner, Cyberchef, Spiderfoot and ES Head to latest master
|
||||
|
||||
## 20190815
|
||||
- **Bump ELK stack to 6.7.2**
|
||||
- Transition to 7.x must iterate slowly through previous versions to prevent changes breaking T-Pots
|
||||
|
||||
## 20190814
|
||||
- **Logstash Translation Maps improvement**
|
||||
- Download translation maps rather than running a git pull
|
||||
- Translation maps will now be bzip2 compressed to reduce traffic to a minimum
|
||||
- Fixes #432
|
||||
|
||||
## 20190802
|
||||
- **Add support for Buster as base image**
|
||||
- Install ISO is now based on Debian Buster
|
||||
- Installation upon Debian Buster is now supported
|
||||
|
||||
## 20190701
|
||||
- **Reworked Ansible T-Pot Deployment**
|
||||
- Transitioned from bash script to all Ansible
|
||||
- Reusable Ansible Playbook for OpenStack clouds
|
||||
- Example Showcase with our Open Telekom Cloud
|
||||
- Adaptable for other cloud providers
|
||||
|
||||
## 20190626
|
||||
- **HPFEEDS Opt-In commandline option**
|
||||
- Pass a hpfeeds config file as a commandline argument
|
||||
- hpfeeds config is saved in `/data/ews/conf/hpfeeds.cfg`
|
||||
- Update script restores hpfeeds config
|
||||
|
||||
## 20190604
|
||||
- **Finalize Fatt support**
|
||||
- Build visualizations, searches, dashboards
|
||||
- Rebuild index patterns
|
||||
- Some finishing touches
|
||||
|
||||
## 20190601
|
||||
- **Start supporting Fatt, remove Glastopf**
|
||||
- Build Dockerfile, Adjust logstash, installer, update and such.
|
||||
- Glastopf is no longer supported within T-Pot
|
||||
|
||||
## 20190528+20190531
|
||||
- **Increase total number of fields**
|
||||
- Adjust total number of fileds for logstash templae from 1000 to 2000.
|
||||
|
||||
## 20190526
|
||||
- **Fix build for Cowrie**
|
||||
- Upstream changes required a new package `py-bcrypt`.
|
||||
|
||||
## 20190525
|
||||
- **Fix build for RDPY**
|
||||
- Building was prevented due to cache error which occurs lately on Alpine if `apk` is using `--no-ache' as options.
|
||||
|
||||
## 20190520
|
||||
- **Adjust permissions for /data folder**
|
||||
- Now it is possible to download files from `/data` using SCP, WINSCP or CyberDuck.
|
||||
|
||||
## 20190513
|
||||
- **Added Ansible T-Pot Deployment on Open Telekom Cloud**
|
||||
- Reusable Ansible Playbooks for all cloud providers
|
||||
- Example Showcase with our Open Telekom Cloud
|
||||
|
||||
## 20190511
|
||||
- **Add hptest script**
|
||||
- Quickly test if the honeypots are working with `hptest.sh <[ip,host]>` based on nmap.
|
||||
|
||||
## 20190508
|
||||
- **Add tsec / install user to tpot group**
|
||||
- For users being able to easily download logs from the /data folder the installer now adds the `tpot` or the logged in user (`who am i`) via `usermod -a -G tpot <user>` to the tpot group. Also /data permissions will now be enforced to `770`, which is necessary for directory listings.
|
||||
|
||||
## 20190502
|
||||
- **Fix KVPs**
|
||||
- Some KVPs for Cowrie changed and the tagcloud was not showing any values in the Cowrie dashboard.
|
||||
- New installations are not affected, however existing installations need to import the objects from /opt/tpot/etc/objects/kibana-objects.json.zip.
|
||||
- **Makeiso**
|
||||
- Move to Xorriso for building the ISO image.
|
||||
- This allows to support most of the Debian based distros, i.e. Debian, MxLinux and Ubuntu.
|
||||
|
||||
## 20190428
|
||||
- **Rebuild ISO**
|
||||
- The install ISO needed a rebuilt after some changes in the Debian mirrors.
|
||||
- **Disable Netselect**
|
||||
- After some reports in the issues that some Debian mirrors were not fully synced and thus some packages were unavailable the netselect-apt feature was disabled.
|
||||
|
||||
## 20190406
|
||||
- **Fix for SSH**
|
||||
- In some situations the SSH Port was not written to a new line (thanks to @dpisano for reporting).
|
||||
- **Fix race condition for apt-fast**
|
||||
- Curl and wget need to be installed before apt-fast installation.
|
||||
|
||||
## 20190404
|
||||
- **Fix #332**
|
||||
- If T-Pot, opposed to the requirements, does not have full internet access netselect-apt fails to determine the fastest mirror as it needs ICMP and UDP outgoing. Should netselect-apt fail the default mirrors will be used.
|
||||
- **Improve install speed with apt-fast**
|
||||
- Migrating from a stable base install to Debian (Sid) requires downloading lots of packages. Depending on your geo location the download speed was already improved by introducing netselect-apt to determine the fastest mirror. With apt-fast the downloads will be even faster by downloading packages not only in parallel but also with multiple connections per package.
|
||||
|
||||
`git log --date=format:"## %Y%m%d" --pretty=format:"%ad %n- **%s**%n - %b"`
|
||||
… and to the entire T-Pot community for opening issues, sharing ideas, and helping improve T-Pot!
|
||||
|
|
43
CITATION.cff
Normal file
|
@ -0,0 +1,43 @@
|
|||
# This CITATION.cff file was generated with cffinit.
|
||||
# Visit https://bit.ly/cffinit to generate yours today!
|
||||
|
||||
cff-version: 1.2.0
|
||||
title: T-Pot 24.04.1
|
||||
message: >-
|
||||
If you use this software, please cite it using the
|
||||
metadata from this file.
|
||||
type: software
|
||||
authors:
|
||||
- name: Deutsche Telekom Security GmbH
|
||||
address: Bonner Talweg 100
|
||||
city: Bonn
|
||||
country: DE
|
||||
post-code: '53113'
|
||||
website: 'https://github.com/telekom-security'
|
||||
- given-names: Marco
|
||||
family-names: Ochse
|
||||
affiliation: Deutsche Telekom Security GmbH
|
||||
identifiers:
|
||||
- type: url
|
||||
value: >-
|
||||
https://github.com/telekom-security/tpotce/releases/tag/24.04.1
|
||||
description: T-Pot Release 24.04.1
|
||||
repository-code: 'https://github.com/telekom-security/tpotce'
|
||||
abstract: >-
|
||||
T-Pot is the all in one, optionally distributed, multiarch
|
||||
(amd64, arm64) honeypot plattform, supporting 20+
|
||||
honeypots and countless visualization options using the
|
||||
Elastic Stack, animated live attack maps and lots of
|
||||
security tools to further improve the deception
|
||||
experience.
|
||||
keywords:
|
||||
- honeypot
|
||||
- deception
|
||||
- t-pot
|
||||
- telekom security
|
||||
- docker
|
||||
- elk
|
||||
license: GPL-3.0
|
||||
commit: release
|
||||
version: 24.04.1
|
||||
date-released: '2024-12-11'
|
23
SECURITY.md
Normal file
|
@ -0,0 +1,23 @@
|
|||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 24.04.1 | :white_check_mark: |
|
||||
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
We prioritize the security of T-Pot highly. Often, vulnerabilities in T-Pot components stem from upstream dependencies, including honeypots, Docker images, tools, or packages. We are committed to working together to resolve any issues effectively.
|
||||
|
||||
Please follow these steps before reporting a potential vulnerability:
|
||||
|
||||
1. Verify that the behavior you've observed isn't already documented as a normal aspect or unrelated issue of T-Pot. For example, Cowrie may initiate outgoing connections, or T-Pot might open all possible TCP ports — a feature enabled by Honeytrap.
|
||||
2. Clearly identify which component is vulnerable (e.g., a specific honeypot, Docker image, tool, package) and isolate the issue.
|
||||
3. Provide a detailed description of the issue, including log and, if available, debug files. Include all steps necessary to reproduce the vulnerability. If you have a proposed solution, hotfix, or patch, please be prepared to submit a pull request (PR).
|
||||
4. Check whether the vulnerability is already known upstream. If there is an existing fix or patch, include that information in your report.
|
||||
|
||||
This approach ensures a thorough and efficient resolution process.
|
||||
|
||||
We aim to respond as quickly as possible. If you believe the issue poses an immediate threat to the entire T-Pot community, you can expedite the process by responsibly alerting our [CERT](https://www.telekom.com/en/corporate-responsibility/data-protection-data-security/security/details/introducing-deutsche-telekom-cert-358316).
|
77
bin/2fa.sh
|
@ -1,77 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Make sure script is started as non-root.
|
||||
myWHOAMI=$(whoami)
|
||||
if [ "$myWHOAMI" = "root" ]
|
||||
then
|
||||
echo "Need to run as non-root ..."
|
||||
echo ""
|
||||
exit
|
||||
fi
|
||||
|
||||
# set vars, check deps
|
||||
myPAM_COCKPIT_FILE="/etc/pam.d/cockpit"
|
||||
if ! [ -s "$myPAM_COCKPIT_FILE" ];
|
||||
then
|
||||
echo "### Cockpit PAM module config does not exist. Something went wrong."
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
myPAM_COCKPIT_GA="
|
||||
|
||||
# google authenticator for two-factor
|
||||
auth required pam_google_authenticator.so
|
||||
"
|
||||
myAUTHENTICATOR=$(which google-authenticator)
|
||||
if [ "$myAUTHENTICATOR" == "" ];
|
||||
then
|
||||
echo "### Could not locate google-authenticator, trying to install (if asked provide root password)."
|
||||
echo ""
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libpam-google-authenticator
|
||||
exec "$1" "$2"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# write PAM changes
|
||||
function fuWRITE_PAM_CHANGES {
|
||||
myCHECK=$(cat $myPAM_COCKPIT_FILE | grep -c "google")
|
||||
if ! [ "$myCHECK" == "0" ];
|
||||
then
|
||||
echo "### PAM config already enabled. Skipped."
|
||||
echo ""
|
||||
else
|
||||
echo "### Updating PAM config for Cockpit (if asked provide root password)."
|
||||
echo "$myPAM_COCKPIT_GA" | sudo tee -a $myPAM_COCKPIT_FILE
|
||||
sudo systemctl restart cockpit
|
||||
fi
|
||||
}
|
||||
|
||||
# create 2fa
|
||||
function fuGEN_TOKEN {
|
||||
echo "### Now generating token for Google Authenticator."
|
||||
echo ""
|
||||
google-authenticator -t -d -r 3 -R 30 -w 17
|
||||
}
|
||||
|
||||
|
||||
# main
|
||||
echo "### This script will enable Two Factor Authentication for Cockpit."
|
||||
echo ""
|
||||
echo "### Please download one of the many authenticator apps from the appstore of your choice."
|
||||
echo ""
|
||||
while true;
|
||||
do
|
||||
read -p "### Ready to start (y/n)? " myANSWER
|
||||
case $myANSWER in
|
||||
[Yy]* ) echo "### OK. Starting ..."; break;;
|
||||
[Nn]* ) echo "### Exiting."; exit;;
|
||||
esac
|
||||
done
|
||||
|
||||
fuWRITE_PAM_CHANGES
|
||||
fuGEN_TOKEN
|
||||
|
||||
echo "Done. Re-run this script by every user who needs Cockpit access."
|
||||
echo ""
|
|
@ -1,46 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Run as root only.
|
||||
myWHOAMI=$(whoami)
|
||||
if [ "$myWHOAMI" != "root" ]
|
||||
then
|
||||
echo "Need to run as root ..."
|
||||
exit
|
||||
fi
|
||||
|
||||
# Backup all ES relevant folders
|
||||
# Make sure ES is available
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
|
||||
if ! [ "$myESSTATUS" = "1" ]
|
||||
then
|
||||
echo "### Elasticsearch is not available, try starting via 'systemctl start tpot'."
|
||||
exit
|
||||
else
|
||||
echo "### Elasticsearch is available, now continuing."
|
||||
echo
|
||||
fi
|
||||
|
||||
# Set vars
|
||||
myCOUNT=1
|
||||
myDATE=$(date +%Y%m%d%H%M)
|
||||
myELKPATH="/data/elk/data"
|
||||
myKIBANAINDEXNAME=$(curl -s -XGET ''$myES'_cat/indices/.kibana' | awk '{ print $4 }')
|
||||
myKIBANAINDEXPATH=$myELKPATH/nodes/0/indices/$myKIBANAINDEXNAME
|
||||
|
||||
# Let's ensure normal operation on exit or if interrupted ...
|
||||
function fuCLEANUP {
|
||||
### Start ELK
|
||||
systemctl start tpot
|
||||
echo "### Now starting T-Pot ..."
|
||||
}
|
||||
trap fuCLEANUP EXIT
|
||||
|
||||
# Stop T-Pot to lift db lock
|
||||
echo "### Now stopping T-Pot"
|
||||
systemctl stop tpot
|
||||
sleep 2
|
||||
|
||||
# Backup DB in 2 flavors
|
||||
echo "### Now backing up Elasticsearch folders ..."
|
||||
tar cvfz "elkall_"$myDATE".tgz" $myELKPATH
|
||||
tar cvfz "elkbase_"$myDATE".tgz" $myKIBANAINDEXPATH
|
54
bin/dps.sh
|
@ -1,54 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Run as root only.
|
||||
myWHOAMI=$(whoami)
|
||||
if [ "$myWHOAMI" != "root" ]
|
||||
then
|
||||
echo "Need to run as root ..."
|
||||
exit
|
||||
fi
|
||||
|
||||
# Show current status of T-Pot containers
|
||||
myPARAM="$1"
|
||||
myCONTAINERS="$(cat /opt/tpot/etc/tpot.yml | grep -v '#' | grep container_name | cut -d: -f2 | sort | tr -d " ")"
|
||||
myRED="[1;31m"
|
||||
myGREEN="[1;32m"
|
||||
myBLUE="[1;34m"
|
||||
myWHITE="[0;0m"
|
||||
myMAGENTA="[1;35m"
|
||||
|
||||
function fuGETSTATUS {
|
||||
grc --colour=on docker ps -f status=running -f status=exited --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep -v "NAME" | sort
|
||||
}
|
||||
|
||||
function fuGETSYS {
|
||||
printf "========| System |========\n"
|
||||
printf "%+10s %-20s\n" "Date: " "$(date)"
|
||||
printf "%+10s %-20s\n" "Uptime: " "$(uptime | cut -b 2-)"
|
||||
echo
|
||||
}
|
||||
|
||||
while true
|
||||
do
|
||||
myDPS=$(fuGETSTATUS)
|
||||
myDPSNAMES=$(echo "$myDPS" | awk '{ print $1 }' | sort)
|
||||
fuGETSYS
|
||||
printf "%-21s %-28s %s\n" "NAME" "STATUS" "PORTS"
|
||||
if [ "$myDPS" != "" ];
|
||||
then
|
||||
echo "$myDPS"
|
||||
fi
|
||||
for i in $myCONTAINERS; do
|
||||
myAVAIL=$(echo "$myDPSNAMES" | grep -o "$i" | uniq | wc -l)
|
||||
if [ "$myAVAIL" = "0" ];
|
||||
then
|
||||
printf "%-28s %-28s\n" "$myRED$i" "DOWN$myWHITE"
|
||||
fi
|
||||
done
|
||||
if [[ $myPARAM =~ ^([1-9]|[1-9][0-9]|[1-9][0-9][0-9])$ ]];
|
||||
then
|
||||
sleep "$myPARAM"
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
56
bin/tped.sh
|
@ -1,56 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Run as root only.
|
||||
myWHOAMI=$(whoami)
|
||||
if [ "$myWHOAMI" != "root" ]
|
||||
then
|
||||
echo "Need to run as root ..."
|
||||
exit
|
||||
fi
|
||||
|
||||
# set backtitle, get filename
|
||||
myBACKTITLE="T-Pot Edition Selection Tool"
|
||||
myYMLS=$(cd /opt/tpot/etc/compose/ && ls -1 *.yml)
|
||||
myLINK="/opt/tpot/etc/tpot.yml"
|
||||
|
||||
# Let's load docker images in parallel
|
||||
function fuPULLIMAGES {
|
||||
local myTPOTCOMPOSE="/opt/tpot/etc/tpot.yml"
|
||||
for name in $(cat $myTPOTCOMPOSE | grep -v '#' | grep image | cut -d'"' -f2 | uniq)
|
||||
do
|
||||
docker pull $name &
|
||||
done
|
||||
wait
|
||||
echo
|
||||
}
|
||||
|
||||
# setup menu
|
||||
for i in $myYMLS;
|
||||
do
|
||||
myITEMS+="$i $(echo $i | cut -d "." -f1 | tr [:lower:] [:upper:]) "
|
||||
done
|
||||
myEDITION=$(dialog --backtitle "$myBACKTITLE" --menu "Select T-Pot Edition" 12 50 5 $myITEMS 3>&1 1>&2 2>&3 3>&-)
|
||||
if [ "$myEDITION" == "" ];
|
||||
then
|
||||
echo "Have a nice day!"
|
||||
exit
|
||||
fi
|
||||
dialog --backtitle "$myBACKTITLE" --title "[ Activate now? ]" --yesno "\n$myEDITION" 7 50
|
||||
myOK=$?
|
||||
if [ "$myOK" == "0" ];
|
||||
then
|
||||
echo "OK - Activating and downloading latest images."
|
||||
systemctl stop tpot
|
||||
if [ "$(docker ps -aq)" != "" ];
|
||||
then
|
||||
docker stop $(docker ps -aq)
|
||||
docker rm $(docker ps -aq)
|
||||
fi
|
||||
rm -f $myLINK
|
||||
ln -s /opt/tpot/etc/compose/$myEDITION $myLINK
|
||||
fuPULLIMAGES
|
||||
systemctl start tpot
|
||||
echo "Done. Use \"dps.sh\" for monitoring"
|
||||
else
|
||||
echo "Have a nice day!"
|
||||
fi
|
|
@ -1,34 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Let's add the first local ip to the /etc/issue and external ip to ews.ip file
|
||||
# If the external IP cannot be detected, the internal IP will be inherited.
|
||||
source /etc/environment
|
||||
myLOCALIP=$(hostname -I | awk '{ print $1 }')
|
||||
myEXTIP=$(/opt/tpot/bin/myip.sh)
|
||||
if [ "$myEXTIP" = "" ];
|
||||
then
|
||||
myEXTIP=$myLOCALIP
|
||||
fi
|
||||
mySSHUSER=$(cat /etc/passwd | grep 1000 | cut -d ':' -f1)
|
||||
echo "[H[2J" > /etc/issue
|
||||
toilet -f ivrit -F metal --filter border:metal "T-Pot 20.06" | sed 's/\\/\\\\/g' >> /etc/issue
|
||||
echo >> /etc/issue
|
||||
echo ",---- [ [1;34m\n[0m ] [ [0;34m\d[0m ] [ [1;30m\t[0m ]" >> /etc/issue
|
||||
echo "|" >> /etc/issue
|
||||
echo "| [1;34mIP: $myLOCALIP ($myEXTIP)[0m" >> /etc/issue
|
||||
echo "| [0;34mSSH: ssh -l tsec -p 64295 $myLOCALIP[0m" >> /etc/issue
|
||||
echo "| [1;30mWEB: https://$myLOCALIP:64297[0m" >> /etc/issue
|
||||
echo "| [0;37mADMIN: https://$myLOCALIP:64294[0m" >> /etc/issue
|
||||
echo "|" >> /etc/issue
|
||||
echo "\`----" >> /etc/issue
|
||||
echo >> /etc/issue
|
||||
tee /data/ews/conf/ews.ip << EOF
|
||||
[MAIN]
|
||||
ip = $myEXTIP
|
||||
EOF
|
||||
tee /opt/tpot/etc/compose/elk_environment << EOF
|
||||
MY_EXTIP=$myEXTIP
|
||||
MY_INTIP=$myLOCALIP
|
||||
MY_HOSTNAME=$HOSTNAME
|
||||
EOF
|
||||
chown tpot:tpot /data/ews/conf/ews.ip
|
||||
chmod 770 /data/ews/conf/ews.ip
|
2
cloud/ansible/.gitignore
vendored
|
@ -1,2 +0,0 @@
|
|||
# Ansible
|
||||
*.retry
|
|
@ -1,237 +0,0 @@
|
|||
# T-Pot Ansible
|
||||
|
||||
Here you can find a ready-to-use solution for your automated T-Pot deployment using [Ansible](https://www.ansible.com/).
|
||||
It consists of an Ansible Playbook with multiple roles, which is reusable for all [OpenStack](https://www.openstack.org/) based clouds (e.g. Open Telekom Cloud, Orange Cloud, Telefonica Open Cloud, OVH) out of the box.
|
||||
Apart from that you can easily adapt the deploy role to use other [cloud providers](https://docs.ansible.com/ansible/latest/modules/list_of_cloud_modules.html) (e.g. AWS, Azure, Digital Ocean, Google).
|
||||
|
||||
The Playbook first creates all resources (security group, network, subnet, router), deploys a new server and then installs and configures T-Pot.
|
||||
|
||||
This example showcases the deployment on our own OpenStack based Public Cloud Offering [Open Telekom Cloud](https://open-telekom-cloud.com/en).
|
||||
|
||||
# Table of contents
|
||||
- [Preparation of Ansible Master](#ansible-master)
|
||||
- [Ansible Installation](#ansible)
|
||||
- [Agent Forwarding](#agent-forwarding)
|
||||
- [Preparations in Open Telekom Cloud Console](#preparation)
|
||||
- [Create new project](#project)
|
||||
- [Create API user](#api-user)
|
||||
- [Import Key Pair](#key-pair)
|
||||
- [Clone Git Repository](#clone-git)
|
||||
- [Settings and recommended values](#settings)
|
||||
- [Clouds.yaml](#clouds-yaml)
|
||||
- [Ansible remote user](#remote-user)
|
||||
- [Instance settings](#instance-settings)
|
||||
- [User password](#user-password)
|
||||
- [Configure `tpot.conf.dist`](#tpot-conf)
|
||||
- [Optional: Custom `ews.cfg`](#ews-cfg)
|
||||
- [Optional: Custom HPFEEDS](#hpfeeds)
|
||||
- [Deploying a T-Pot](#deploy)
|
||||
- [Further documentation](#documentation)
|
||||
|
||||
<a name="ansible-master"></a>
|
||||
# Preparation of Ansible Master
|
||||
You can either run the Ansible Playbook locally on your Linux or macOS machine or you can use an ECS (Elastic Cloud Server) on Open Telekom Cloud, which I did.
|
||||
I used Ubuntu 18.04 for my Ansible Master Server, but other OSes are fine too.
|
||||
Ansible works over the SSH Port, so you don't have to add any special rules to your Security Group.
|
||||
|
||||
<a name="ansible"></a>
|
||||
## Ansible Installation
|
||||
Example for Ubuntu 18.04:
|
||||
|
||||
At first we update the system:
|
||||
`sudo apt update`
|
||||
`sudo apt dist-upgrade`
|
||||
|
||||
Then we need to add the repository and install Ansible:
|
||||
`sudo apt-add-repository --yes --update ppa:ansible/ansible`
|
||||
`sudo apt install ansible`
|
||||
|
||||
For other OSes and Distros have a look at the official [Ansible Documentation](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html).
|
||||
|
||||
<a name="agent-forwarding"></a>
|
||||
## Agent Forwarding
|
||||
If you run the Ansible Playbook remotely on your Ansible Master Server, Agent Forwarding must be enabled in order to let Ansible connect to newly created machines.
|
||||
- On Linux or macOS:
|
||||
- Create or edit `~/.ssh/config`
|
||||
```
|
||||
Host ANSIBLE_MASTER_IP
|
||||
ForwardAgent yes
|
||||
```
|
||||
- On Windows using Putty:
|
||||

|
||||
|
||||
<a name="preparation"></a>
|
||||
# Preparations in Open Telekom Cloud Console
|
||||
(You can skip this if you have already set up a project and an API account with key pair)
|
||||
(Just make sure you know the naming for everything, as you need to configure the Ansible variables.)
|
||||
|
||||
Before we can start deploying, we have to prepare the Open Telekom Cloud tenant.
|
||||
For that, go to the [Web Console](https://auth.otc.t-systems.com/authui/login) and log in with an admin user.
|
||||
|
||||
<a name="project"></a>
|
||||
## Create new project
|
||||
I strongly advise you to create a separate project for the T-Pots in your tenant.
|
||||
In my case I named it `tpot`.
|
||||
|
||||

|
||||
|
||||
<a name="api-user"></a>
|
||||
## Create API user
|
||||
The next step is to create a new user account, which is restricted to the project.
|
||||
This ensures that the API access is limited to that project.
|
||||
|
||||

|
||||
|
||||
<a name="key-pair"></a>
|
||||
## Import Key Pair
|
||||
:warning: Now log in with the newly created API user account and select your project.
|
||||
|
||||

|
||||
|
||||
Import your SSH public key.
|
||||
|
||||

|
||||
|
||||
|
||||
<a name="clone-git"></a>
|
||||
# Clone Git Repository
|
||||
Clone the `tpotce` repository to your Ansible Master:
|
||||
`git clone https://github.com/dtag-dev-sec/tpotce.git`
|
||||
All Ansible related files are located in the [`cloud/ansible/openstack`](openstack) folder.
|
||||
|
||||
<a name="settings"></a>
|
||||
# Settings and recommended values
|
||||
You can configure all aspects of your Elastic Cloud Server and T-Pot before using the Playbook:
|
||||
|
||||
<a name="clouds-yaml"></a>
|
||||
## Clouds.yaml
|
||||
Located at [`openstack/clouds.yaml`](openstack/clouds.yaml).
|
||||
Enter your Open Telekom Cloud API user credentials here (username, password, project name, user domain name):
|
||||
```
|
||||
clouds:
|
||||
open-telekom-cloud:
|
||||
profile: otc
|
||||
auth:
|
||||
project_name: eu-de_your_project
|
||||
username: your_api_user
|
||||
password: your_password
|
||||
user_domain_name: OTC-EU-DE-000000000010000XXXXX
|
||||
```
|
||||
You can also perform different authentication methods like sourcing OpenStack OS_* environment variables or providing an inline dictionary.
|
||||
For more information have a look in the [os_server](https://docs.ansible.com/ansible/latest/modules/os_server_module.html) Ansible module documentation.
|
||||
|
||||
<a name="remote-user"></a>
|
||||
## Ansible remote user
|
||||
You may have to adjust the `remote_user` in the Ansible Playbook under [`openstack/deploy_tpot.yaml`](openstack/deploy_tpot.yaml) depending on your Debian base image (e.g. on Open Telekom Cloud the default Debian user is `linux`).
|
||||
|
||||
<a name="instance-settings"></a>
|
||||
## Instance settings
|
||||
Located at [`openstack/roles/deploy/vars/main.yaml`](openstack/roles/deploy/vars/main.yaml).
|
||||
Here you can customize your virtual machine specifications:
|
||||
- Choose an availability zone. For Open Telekom Cloud reference see [here](https://docs.otc.t-systems.com/en-us/endpoint/index.html).
|
||||
- Change the OS image (For T-Pot we need Debian)
|
||||
- (Optional) Change the volume size
|
||||
- Specify your key pair (:warning: Mandatory)
|
||||
- (Optional) Change the instance type (flavor)
|
||||
`s2.medium.8` corresponds to 1 vCPU and 8GB of RAM and is the minimum required flavor.
|
||||
A full list of Open Telekom Cloud flavors can be found [here](https://docs.otc.t-systems.com/en-us/usermanual/ecs/en-us_topic_0177512565.html).
|
||||
|
||||
```
|
||||
availability_zone: eu-de-03
|
||||
image: Standard_Debian_10_latest
|
||||
volume_size: 128
|
||||
key_name: your-KeyPair
|
||||
flavor: s2.medium.8
|
||||
```
|
||||
|
||||
<a name="user-password"></a>
|
||||
## User password
|
||||
Located at [`openstack/roles/install/vars/main.yaml`](openstack/roles/install/vars/main.yaml).
|
||||
Here you can set the password for your Debian user (**you should definitely change that**).
|
||||
```
|
||||
user_password: LiNuXuSeRPaSs#
|
||||
```
|
||||
|
||||
<a name="tpot-conf"></a>
|
||||
## Configure `tpot.conf.dist`
|
||||
The file is located in [`iso/installer/tpot.conf.dist`](/iso/installer/tpot.conf.dist).
|
||||
Here you can choose:
|
||||
- between the various T-Pot editions
|
||||
- a username for the web interface
|
||||
- a password for the web interface (**you should definitely change that**)
|
||||
|
||||
```
|
||||
# tpot configuration file
|
||||
# myCONF_TPOT_FLAVOR=[STANDARD, SENSOR, INDUSTRIAL, COLLECTOR, NEXTGEN]
|
||||
myCONF_TPOT_FLAVOR='STANDARD'
|
||||
myCONF_WEB_USER='webuser'
|
||||
myCONF_WEB_PW='w3b$ecret'
|
||||
```
|
||||
|
||||
<a name="ews-cfg"></a>
|
||||
## Optional: Custom `ews.cfg`
|
||||
Enable this by uncommenting the role in the [deploy_tpot.yaml](openstack/deploy_tpot.yaml) playbook.
|
||||
```
|
||||
# - custom_ews
|
||||
```
|
||||
|
||||
You can use a custom config file for `ewsposter`.
|
||||
e.g. when you have your own credentials for delivering data to our [Sicherheitstacho](https://sicherheitstacho.eu/start/main).
|
||||
You can find the `ews.cfg` template file here: [`openstack/roles/custom_ews/templates/ews.cfg`](openstack/roles/custom_ews/templates/ews.cfg) and adapt it for your needs.
|
||||
|
||||
For setting custom credentials, these settings would be relevant for you (the rest of the file can stay as is):
|
||||
```
|
||||
[MAIN]
|
||||
...
|
||||
contact = your_email_address
|
||||
...
|
||||
|
||||
[EWS]
|
||||
...
|
||||
username = your_username
|
||||
token = your_token
|
||||
...
|
||||
```
|
||||
|
||||
<a name="hpfeeds"></a>
|
||||
## Optional: Custom HPFEEDS
|
||||
Enable this by uncommenting the role in the [deploy_tpot.yaml](openstack/deploy_tpot.yaml) playbook.
|
||||
```
|
||||
# - custom_hpfeeds
|
||||
```
|
||||
|
||||
You can specify custom HPFEEDS in [`openstack/roles/custom_hpfeeds/templates/hpfeeds.cfg`](openstack/roles/custom_hpfeeds/templates/hpfeeds.cfg).
|
||||
That file contains the defaults (turned off) and you can adapt it for your needs, e.g. for SISSDEN:
|
||||
```
|
||||
myENABLE=true
|
||||
myHOST=hpfeeds.sissden.eu
|
||||
myPORT=10000
|
||||
myCHANNEL=t-pot.events
|
||||
myCERT=/opt/ewsposter/sissden.pem
|
||||
myIDENT=your_user
|
||||
mySECRET=your_secret
|
||||
myFORMAT=json
|
||||
```
|
||||
|
||||
<a name="deploy"></a>
|
||||
# Deploying a T-Pot :honey_pot::honeybee:
|
||||
Now, after configuring everything, we can finally start deploying T-Pots!
|
||||
Go to the [`openstack`](openstack) folder and run the Ansible Playbook with:
|
||||
`ansible-playbook deploy_tpot.yaml`
|
||||
(Yes, it is as easy as that :smile:)
|
||||
|
||||
If you are running on a machine which asks for a sudo password, you can use:
|
||||
`ansible-playbook --ask-become-pass deploy_tpot.yaml`
|
||||
|
||||
The Playbook will first install required packages on the Ansible Master and then deploy a new server instance.
|
||||
After that, T-Pot gets installed and configured on the newly created host, optionally custom configs are applied and finally it reboots.
|
||||
|
||||
Once this is done, you can proceed with connecting/logging in to the T-Pot according to the [documentation](https://github.com/dtag-dev-sec/tpotce#ssh-and-web-access).
|
||||
|
||||
<a name="documentation"></a>
|
||||
# Further documentation
|
||||
- [Ansible Documentation](https://docs.ansible.com/ansible/latest/)
|
||||
- [Cloud modules — Ansible Documentation](https://docs.ansible.com/ansible/latest/modules/list_of_cloud_modules.html)
|
||||
- [os_server – Create/Delete Compute Instances from OpenStack — Ansible Documentation](https://docs.ansible.com/ansible/latest/modules/os_server_module.html)
|
||||
- [Open Telekom Cloud Help Center](https://docs.otc.t-systems.com/)
|
||||
- [Open Telekom Cloud API Overview](https://docs.otc.t-systems.com/en-us/api/wp/en-us_topic_0052070394.html)
|
Before Width: | Height: | Size: 204 KiB |
Before Width: | Height: | Size: 883 KiB |
Before Width: | Height: | Size: 148 KiB |
Before Width: | Height: | Size: 193 KiB |
Before Width: | Height: | Size: 23 KiB |
|
@ -1,6 +0,0 @@
|
|||
[defaults]
|
||||
host_key_checking = false
|
||||
|
||||
[ssh_connection]
|
||||
scp_if_ssh = true
|
||||
ssh_args = -o ServerAliveInterval=60
|
|
@ -1,8 +0,0 @@
|
|||
clouds:
|
||||
open-telekom-cloud:
|
||||
profile: otc
|
||||
auth:
|
||||
project_name: eu-de_your_project
|
||||
username: your_api_user
|
||||
password: your_password
|
||||
user_domain_name: OTC-EU-DE-000000000010000XXXXX
|
|
@ -1,21 +0,0 @@
|
|||
- name: Check host prerequisites
|
||||
hosts: localhost
|
||||
become: yes
|
||||
roles:
|
||||
- check
|
||||
|
||||
- name: Deploy instance
|
||||
hosts: localhost
|
||||
roles:
|
||||
- deploy
|
||||
|
||||
- name: Install T-Pot on new instance
|
||||
hosts: TPOT
|
||||
remote_user: linux
|
||||
become: yes
|
||||
gather_facts: no
|
||||
roles:
|
||||
- install
|
||||
# - custom_ews
|
||||
# - custom_hpfeeds
|
||||
- reboot
|
|
@ -1,17 +0,0 @@
|
|||
- name: Install dependencies
|
||||
package:
|
||||
name:
|
||||
- pwgen
|
||||
- python-setuptools
|
||||
- python-pip
|
||||
state: present
|
||||
|
||||
- name: Install openstacksdk
|
||||
pip:
|
||||
name: openstacksdk
|
||||
|
||||
- name: Check if agent forwarding is enabled
|
||||
fail:
|
||||
msg: Please enable agent forwarding to allow Ansible to connect to the remote host!
|
||||
ignore_errors: yes
|
||||
when: lookup('env','SSH_AUTH_SOCK') == ""
|
|
@ -1,13 +0,0 @@
|
|||
- name: Copy ews configuration file
|
||||
template:
|
||||
src: ews.cfg
|
||||
dest: /data/ews/conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
- name: Patching tpot.yml with custom ews configuration file
|
||||
lineinfile:
|
||||
path: /opt/tpot/etc/tpot.yml
|
||||
insertafter: "/opt/ewsposter/ews.ip"
|
||||
line: " - /data/ews/conf/ews.cfg:/opt/ewsposter/ews.cfg"
|
|
@ -1,137 +0,0 @@
|
|||
[MAIN]
|
||||
homedir = /opt/ewsposter/
|
||||
spooldir = /opt/ewsposter/spool/
|
||||
logdir = /opt/ewsposter/log/
|
||||
del_malware_after_send = false
|
||||
send_malware = true
|
||||
sendlimit = 500
|
||||
contact = your_email_address
|
||||
proxy =
|
||||
ip =
|
||||
|
||||
[EWS]
|
||||
ews = true
|
||||
username = your_username
|
||||
token = your_token
|
||||
rhost_first = https://community.sicherheitstacho.eu/ews-0.1/alert/postSimpleMessage
|
||||
rhost_second = https://community.sicherheitstacho.eu/ews-0.1/alert/postSimpleMessage
|
||||
ignorecert = false
|
||||
|
||||
[HPFEED]
|
||||
hpfeed = %(EWS_HPFEEDS_ENABLE)s
|
||||
host = %(EWS_HPFEEDS_HOST)s
|
||||
port = %(EWS_HPFEEDS_PORT)s
|
||||
channels = %(EWS_HPFEEDS_CHANNELS)s
|
||||
ident = %(EWS_HPFEEDS_IDENT)s
|
||||
secret= %(EWS_HPFEEDS_SECRET)s
|
||||
# path/to/certificate for tls broker - or "false" for non-tls broker
|
||||
tlscert = %(EWS_HPFEEDS_TLSCERT)s
|
||||
# hpfeeds submission format: "ews" (xml) or "json"
|
||||
hpfformat = %(EWS_HPFEEDS_FORMAT)s
|
||||
|
||||
[EWSJSON]
|
||||
json = false
|
||||
jsondir = /data/ews/json/
|
||||
|
||||
[GLASTOPFV3]
|
||||
glastopfv3 = true
|
||||
nodeid = glastopfv3-{{ ansible_hostname }}
|
||||
sqlitedb = /data/glastopf/db/glastopf.db
|
||||
malwaredir = /data/glastopf/data/files/
|
||||
|
||||
[GLASTOPFV2]
|
||||
glastopfv2 = false
|
||||
nodeid =
|
||||
mysqlhost =
|
||||
mysqldb =
|
||||
mysqluser =
|
||||
mysqlpw =
|
||||
malwaredir =
|
||||
|
||||
[KIPPO]
|
||||
kippo = false
|
||||
nodeid =
|
||||
mysqlhost =
|
||||
mysqldb =
|
||||
mysqluser =
|
||||
mysqlpw =
|
||||
malwaredir =
|
||||
|
||||
[COWRIE]
|
||||
cowrie = true
|
||||
nodeid = cowrie-{{ ansible_hostname }}
|
||||
logfile = /data/cowrie/log/cowrie.json
|
||||
|
||||
[DIONAEA]
|
||||
dionaea = true
|
||||
nodeid = dionaea-{{ ansible_hostname }}
|
||||
malwaredir = /data/dionaea/binaries/
|
||||
sqlitedb = /data/dionaea/log/dionaea.sqlite
|
||||
|
||||
[HONEYTRAP]
|
||||
honeytrap = true
|
||||
nodeid = honeytrap-{{ ansible_hostname }}
|
||||
newversion = true
|
||||
payloaddir = /data/honeytrap/attacks/
|
||||
attackerfile = /data/honeytrap/log/attacker.log
|
||||
|
||||
[RDPDETECT]
|
||||
rdpdetect = false
|
||||
nodeid =
|
||||
iptableslog =
|
||||
targetip =
|
||||
|
||||
[EMOBILITY]
|
||||
eMobility = false
|
||||
nodeid = emobility-{{ ansible_hostname }}
|
||||
logfile = /data/emobility/log/centralsystemEWS.log
|
||||
|
||||
[CONPOT]
|
||||
conpot = true
|
||||
nodeid = conpot-{{ ansible_hostname }}
|
||||
logfile = /data/conpot/log/conpot*.json
|
||||
|
||||
[ELASTICPOT]
|
||||
elasticpot = true
|
||||
nodeid = elasticpot-{{ ansible_hostname }}
|
||||
logfile = /data/elasticpot/log/elasticpot.log
|
||||
|
||||
[SURICATA]
|
||||
suricata = true
|
||||
nodeid = suricata-{{ ansible_hostname }}
|
||||
logfile = /data/suricata/log/eve.json
|
||||
|
||||
[MAILONEY]
|
||||
mailoney = true
|
||||
nodeid = mailoney-{{ ansible_hostname }}
|
||||
logfile = /data/mailoney/log/commands.log
|
||||
|
||||
[RDPY]
|
||||
rdpy = true
|
||||
nodeid = rdpy-{{ ansible_hostname }}
|
||||
logfile = /data/rdpy/log/rdpy.log
|
||||
|
||||
[VNCLOWPOT]
|
||||
vnclowpot = true
|
||||
nodeid = vnclowpot-{{ ansible_hostname }}
|
||||
logfile = /data/vnclowpot/log/vnclowpot.log
|
||||
|
||||
[HERALDING]
|
||||
heralding = true
|
||||
nodeid = heralding-{{ ansible_hostname }}
|
||||
logfile = /data/heralding/log/auth.csv
|
||||
|
||||
[CISCOASA]
|
||||
ciscoasa = true
|
||||
nodeid = ciscoasa-{{ ansible_hostname }}
|
||||
logfile = /data/ciscoasa/log/ciscoasa.log
|
||||
|
||||
[TANNER]
|
||||
tanner = true
|
||||
nodeid = tanner-{{ ansible_hostname }}
|
||||
logfile = /data/tanner/log/tanner_report.json
|
||||
|
||||
[GLUTTON]
|
||||
glutton = true
|
||||
nodeid = glutton-{{ ansible_hostname }}
|
||||
logfile = /data/glutton/log/glutton.log
|
|
@ -1,8 +0,0 @@
|
|||
myENABLE=false
|
||||
myHOST=host
|
||||
myPORT=port
|
||||
myCHANNEL=channels
|
||||
myCERT=false
|
||||
myIDENT=user
|
||||
mySECRET=secret
|
||||
myFORMAT=json
|
|
@ -1,12 +0,0 @@
|
|||
- name: Copy hpfeeds configuration file
|
||||
copy:
|
||||
src: hpfeeds.cfg
|
||||
dest: /data/ews/conf
|
||||
owner: tpot
|
||||
group: tpot
|
||||
mode: 0770
|
||||
register: config
|
||||
|
||||
- name: Applying hpfeeds settings
|
||||
command: /opt/tpot/bin/hpfeeds_optin.sh --conf=/data/ews/conf/hpfeeds.cfg
|
||||
when: config.changed == true
|
|
@ -1,58 +0,0 @@
|
|||
- name: Create T-Pot name
|
||||
shell: echo t-pot-ansible-$(pwgen -ns 6 -1)
|
||||
register: tpot_name
|
||||
|
||||
- name: Create security group
|
||||
os_security_group:
|
||||
cloud: open-telekom-cloud
|
||||
name: sg-tpot-any
|
||||
description: tpot any-any
|
||||
|
||||
- name: Add rules to security group
|
||||
os_security_group_rule:
|
||||
cloud: open-telekom-cloud
|
||||
security_group: sg-tpot-any
|
||||
remote_ip_prefix: 0.0.0.0/0
|
||||
|
||||
- name: Create network
|
||||
os_network:
|
||||
cloud: open-telekom-cloud
|
||||
name: network-tpot
|
||||
|
||||
- name: Create subnet
|
||||
os_subnet:
|
||||
cloud: open-telekom-cloud
|
||||
network_name: network-tpot
|
||||
name: subnet-tpot
|
||||
cidr: 192.168.0.0/24
|
||||
dns_nameservers:
|
||||
- 1.1.1.1
|
||||
- 8.8.8.8
|
||||
|
||||
- name: Create router
|
||||
os_router:
|
||||
cloud: open-telekom-cloud
|
||||
name: router-tpot
|
||||
interfaces:
|
||||
- subnet-tpot
|
||||
|
||||
- name: Launch an instance
|
||||
os_server:
|
||||
cloud: open-telekom-cloud
|
||||
name: "{{ tpot_name.stdout }}"
|
||||
availability_zone: "{{ availability_zone }}"
|
||||
image: "{{ image }}"
|
||||
boot_from_volume: yes
|
||||
volume_size: "{{ volume_size }}"
|
||||
key_name: "{{ key_name }}"
|
||||
timeout: 200
|
||||
flavor: "{{ flavor }}"
|
||||
security_groups: sg-tpot-any
|
||||
network: network-tpot
|
||||
register: tpot
|
||||
|
||||
- name: Add instance to inventory
|
||||
add_host:
|
||||
hostname: "{{ tpot_name.stdout }}"
|
||||
ansible_host: "{{ tpot.server.public_v4 }}"
|
||||
groups: TPOT
|
|
@ -1,5 +0,0 @@
|
|||
availability_zone: eu-de-03
|
||||
image: Standard_Debian_10_latest
|
||||
volume_size: 128
|
||||
key_name: your-KeyPair
|
||||
flavor: s2.medium.8
|
|
@ -1,48 +0,0 @@
|
|||
- name: Waiting for SSH connection
|
||||
wait_for_connection:
|
||||
|
||||
- name: Gathering facts
|
||||
setup:
|
||||
|
||||
- name: Cloning T-Pot install directory
|
||||
git:
|
||||
repo: "https://github.com/dtag-dev-sec/tpotce.git"
|
||||
dest: /root/tpot
|
||||
|
||||
- name: Prepare to set user password
|
||||
set_fact:
|
||||
user_name: "{{ ansible_user }}"
|
||||
user_salt: "s0mew1ck3dTpoT"
|
||||
no_log: true
|
||||
|
||||
- name: Changing password for user {{ user_name }}
|
||||
user:
|
||||
name: "{{ ansible_user }}"
|
||||
password: "{{ user_password | password_hash('sha512', user_salt) }}"
|
||||
state: present
|
||||
shell: /bin/bash
|
||||
|
||||
- name: Copy T-Pot configuration file
|
||||
template:
|
||||
src: ../../../../../../iso/installer/tpot.conf.dist
|
||||
dest: /root/tpot.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
- name: Install T-Pot on instance - be patient, this might take 15 to 30 minutes depending on the connection speed.
|
||||
command: /root/tpot/iso/installer/install.sh --type=auto --conf=/root/tpot.conf
|
||||
|
||||
- name: Delete T-Pot configuration file
|
||||
file:
|
||||
path: /root/tpot.conf
|
||||
state: absent
|
||||
|
||||
- name: Change unattended-upgrades to take default action
|
||||
blockinfile:
|
||||
dest: /etc/apt/apt.conf.d/50unattended-upgrades
|
||||
block: |
|
||||
Dpkg::Options {
|
||||
"--force-confdef";
|
||||
"--force-confold";
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
user_password: LiNuXuSeRPaSs#
|
|
@ -1,16 +0,0 @@
|
|||
- name: Finally rebooting T-Pot
|
||||
command: shutdown -r now
|
||||
async: 1
|
||||
poll: 0
|
||||
|
||||
- name: Next login options
|
||||
debug:
|
||||
msg:
|
||||
- "***** SSH Access:"
|
||||
- "***** ssh {{ ansible_user }}@{{ ansible_host }} -p 64295"
|
||||
- ""
|
||||
- "***** Web UI:"
|
||||
- "***** https://{{ ansible_host }}:64297"
|
||||
- ""
|
||||
- "***** Admin UI:"
|
||||
- "***** https://{{ ansible_host }}:64294"
|
2
cloud/terraform/.gitignore
vendored
|
@ -1,2 +0,0 @@
|
|||
**/.terraform
|
||||
**/terraform.*
|
|
@ -1,127 +0,0 @@
|
|||
# T-Pot Terraform
|
||||
This [Terraform](https://www.terraform.io/) configuration can be used to launch a virtual machine, bootstrap any dependencies and install T-Pot in a single step.
|
||||
Configuration for Amazon Web Services (AWS) and Open Telekom Cloud (OTC) is currently included.
|
||||
This can easily be extended to support other [Terraform providers](https://www.terraform.io/docs/providers/index.html).
|
||||
|
||||
[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) is used to bootstrap the instance and install T-Pot on startup.
|
||||
|
||||
# Table of Contents
|
||||
- [What get's created](#what-created)
|
||||
- [Amazon Web Services (AWS)](#what-created-aws)
|
||||
- [Open Telekom Cloud (OTC)](#what-created-otc)
|
||||
- [Pre-Requisites](#pre)
|
||||
- [Amazon Web Services (AWS)](#pre-aws)
|
||||
- [Open Telekom Cloud (OTC)](#pre-otc)
|
||||
- [Terraform Variables](#variables)
|
||||
- [Common configuration items](#variables-common)
|
||||
- [Amazon Web Services (AWS)](#variables-aws)
|
||||
- [Open Telekom Cloud (OTC)](#variables-otc)
|
||||
- [Initialising](#initialising)
|
||||
- [Applying the Configuration](#applying)
|
||||
- [Connecting to the Instance](#connecting)
|
||||
|
||||
<a name="what-created"></a>
|
||||
## What get's created
|
||||
|
||||
<a name="what-created-aws"></a>
|
||||
### Amazon Web Services (AWS)
|
||||
* EC2 instance:
|
||||
* t3.large (2 vCPUs, 8 GB RAM)
|
||||
* 128 GB disk
|
||||
* Debian 10
|
||||
* Public IP
|
||||
* Security Group:
|
||||
* TCP/UDP ports <= 64000 open to the Internet
|
||||
* TCP ports 64294, 64295 and 64297 open to a chosen administrative IP
|
||||
|
||||
<a name="what-created-otc"></a>
|
||||
### Open Telekom Cloud (OTC)
|
||||
* ECS instance:
|
||||
* s2.medium.8 (1 vCPU, 8 GB RAM)
|
||||
* 128 GB disk
|
||||
* Debian 10
|
||||
* Public EIP
|
||||
* Security Group
|
||||
* Network, Subnet, Router (= Virtual Private Cloud [VPC])
|
||||
|
||||
<a name="pre"></a>
|
||||
## Pre-Requisites
|
||||
* [Terraform](https://www.terraform.io/) 0.12
|
||||
|
||||
<a name="pre-aws"></a>
|
||||
### Amazon Web Services (AWS)
|
||||
* AWS Account
|
||||
* Existing VPC: VPC ID needs to be specified in `aws/variables.tf`
|
||||
* Existing subnet: Subnet ID needs to be specified in `aws/variables.tf`
|
||||
* Existing SSH key pair: Key name needs to be specified in `aws/variables.tf`
|
||||
* AWS Authentication credentials should be [set using environment variables](https://www.terraform.io/docs/providers/aws/index.html#environment-variables)
|
||||
|
||||
<a name="pre-otc"></a>
|
||||
### Open Telekom Cloud (OTC)
|
||||
* OTC Account
|
||||
* Existing SSH key pair: Key name needs to be specified in `otc/variables.tf`
|
||||
* OTC Authentication credentials (Username, Password, Project Name, User Domain Name) can be set in the `otc/clouds.yaml` file
|
||||
|
||||
<a name="variables"></a>
|
||||
## Terraform Variables
|
||||
|
||||
<a name="variables-common"></a>
|
||||
### Common configuration items
|
||||
These variables exist in `aws/variables.tf` and `otc/variables.tf` respectively.
|
||||
Settings for cloud-init:
|
||||
* `timezone` - Set the Server's timezone
|
||||
* `linux_password`- Set a password for the Linux Operating System user (which is also used on the Admin UI)
|
||||
|
||||
Settings for T-Pot:
|
||||
* `tpot_flavor` - Set the flavor of the T-Pot (Available flavors are listed in the variable's description)
|
||||
* `web_user` - Set a username for the T-Pot Kibana Dasboard
|
||||
* `web_password` - Set a password for the T-Pot Kibana Dashboard
|
||||
|
||||
<a name="variables-aws"></a>
|
||||
### Amazon Web Services (AWS)
|
||||
In `aws/variables.tf`, you can change the additional variables:
|
||||
* `admin_ip` - source IP address(es) that you will use to administer the system. Connections to TCP ports 64294, 64295 and 64297 will be allowed from this IP only. Multiple IPs or CIDR blocks can be specified in the format: `["127.0.0.1/32", "192.168.0.0/24"]`
|
||||
* `ec2_vpc_id` - Specify an existing VPC ID
|
||||
* `ec2_subnet_id` - Specify an existing Subnet ID
|
||||
* `ec2_region`
|
||||
* `ec2_ssh_key_name` - Specify an existing SSH key pair
|
||||
* `ec2_instance_type`
|
||||
|
||||
<a name="variables-otc"></a>
|
||||
### Open Telekom Cloud (OTC)
|
||||
In `otc/variables.tf`, you can change the additional variables:
|
||||
* `availabiliy_zone`
|
||||
* `flavor`
|
||||
* `key_pair` - Specify an existing SSH key pair
|
||||
* `image_id`
|
||||
* `volume_size`
|
||||
Furthermore you can configure the naming of the created infrastructure (per default everything gets prefixed with "tpot-", e.g. "tpot-router").
|
||||
|
||||
<a name="initialising"></a>
|
||||
## Initialising
|
||||
The [`terraform init`](https://www.terraform.io/docs/commands/init.html) command is used to initialize a working directory containing Terraform configuration files.
|
||||
|
||||
```
|
||||
$ cd aws
|
||||
$ terraform init
|
||||
```
|
||||
OR
|
||||
```
|
||||
$ cd otc
|
||||
$ terraform init
|
||||
```
|
||||
|
||||
<a name="applying"></a>
|
||||
## Applying the Configuration
|
||||
The [`terraform apply`](https://www.terraform.io/docs/commands/apply.html) command is used to apply the changes required to reach the desired state of the configuration, or the pre-determined set of actions generated by a [`terraform plan`](https://www.terraform.io/docs/commands/plan.html) execution plan.
|
||||
|
||||
```
|
||||
$ terraform apply
|
||||
```
|
||||
This will create your infrastructure and start a Cloud Server. On startup, the Server gets bootstrapped with cloud-init and will install T-Pot. Once this is done, the server will reboot.
|
||||
|
||||
If you want the remove the built infrastructure, you can run [`terraform destroy`](https://www.terraform.io/docs/commands/destroy.html) to delete it.
|
||||
|
||||
<a name="connecting"></a>
|
||||
## Connecting to the Instance
|
||||
When the installation is completed, you can proceed with connecting/logging in to the T-Pot according to the [documentation](https://github.com/dtag-dev-sec/tpotce#ssh-and-web-access).
|
|
@ -1,66 +0,0 @@
|
|||
provider "aws" {
|
||||
region = var.ec2_region
|
||||
}
|
||||
|
||||
resource "aws_security_group" "tpot" {
|
||||
name = "T-Pot"
|
||||
description = "T-Pot Honeypot"
|
||||
vpc_id = var.ec2_vpc_id
|
||||
ingress {
|
||||
from_port = 0
|
||||
to_port = 64000
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
ingress {
|
||||
from_port = 0
|
||||
to_port = 64000
|
||||
protocol = "udp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
ingress {
|
||||
from_port = 64294
|
||||
to_port = 64294
|
||||
protocol = "tcp"
|
||||
cidr_blocks = var.admin_ip
|
||||
}
|
||||
ingress {
|
||||
from_port = 64295
|
||||
to_port = 64295
|
||||
protocol = "tcp"
|
||||
cidr_blocks = var.admin_ip
|
||||
}
|
||||
ingress {
|
||||
from_port = 64297
|
||||
to_port = 64297
|
||||
protocol = "tcp"
|
||||
cidr_blocks = var.admin_ip
|
||||
}
|
||||
egress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
tags = {
|
||||
Name = "T-Pot"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_instance" "tpot" {
|
||||
ami = var.ec2_ami[var.ec2_region]
|
||||
instance_type = var.ec2_instance_type
|
||||
key_name = var.ec2_ssh_key_name
|
||||
subnet_id = var.ec2_subnet_id
|
||||
tags = {
|
||||
Name = "T-Pot Honeypot"
|
||||
}
|
||||
root_block_device {
|
||||
volume_type = "gp2"
|
||||
volume_size = 128
|
||||
delete_on_termination = true
|
||||
}
|
||||
user_data = templatefile("../cloud-init.yaml", {timezone = var.timezone, password = var.linux_password, tpot_flavor = var.tpot_flavor, web_user = var.web_user, web_password = var.web_password})
|
||||
vpc_security_group_ids = [aws_security_group.tpot.id]
|
||||
associate_public_ip_address = true
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
output "Admin_UI" {
|
||||
value = "https://${aws_instance.tpot.public_dns}:64294/"
|
||||
}
|
||||
|
||||
output "SSH_Access" {
|
||||
value = "ssh -i {private_key_file} -p 64295 admin@${aws_instance.tpot.public_dns}"
|
||||
}
|
||||
|
||||
output "Web_UI" {
|
||||
value = "https://${aws_instance.tpot.public_dns}:64297/"
|
||||
}
|
||||
|
|
@ -1,80 +0,0 @@
|
|||
variable "admin_ip" {
|
||||
default = ["127.0.0.1/32"]
|
||||
description = "admin IP addresses in CIDR format"
|
||||
}
|
||||
|
||||
variable "ec2_vpc_id" {
|
||||
description = "ID of AWS VPC"
|
||||
default = "vpc-XXX"
|
||||
}
|
||||
|
||||
variable "ec2_subnet_id" {
|
||||
description = "ID of AWS VPC subnet"
|
||||
default = "subnet-YYY"
|
||||
}
|
||||
|
||||
variable "ec2_region" {
|
||||
description = "AWS region to launch servers"
|
||||
default = "eu-west-1"
|
||||
}
|
||||
|
||||
variable "ec2_ssh_key_name" {
|
||||
default = "default"
|
||||
}
|
||||
|
||||
# https://aws.amazon.com/ec2/instance-types/
|
||||
# t3.large = 2 vCPU, 8 GiB RAM
|
||||
variable "ec2_instance_type" {
|
||||
default = "t3.large"
|
||||
}
|
||||
|
||||
# Refer to https://wiki.debian.org/Cloud/AmazonEC2Image/Buster
|
||||
variable "ec2_ami" {
|
||||
type = map(string)
|
||||
default = {
|
||||
"ap-east-1" = "ami-f9c58188"
|
||||
"ap-northeast-1" = "ami-0fae5501ae428f9d7"
|
||||
"ap-northeast-2" = "ami-0522874b039290246"
|
||||
"ap-south-1" = "ami-03b4e18f70aca8973"
|
||||
"ap-southeast-1" = "ami-0852293c17f5240b3"
|
||||
"ap-southeast-2" = "ami-03ea2db714f1f6acf"
|
||||
"ca-central-1" = "ami-094511e5020cdea18"
|
||||
"eu-central-1" = "ami-0394acab8c5063f6f"
|
||||
"eu-north-1" = "ami-0c82d9a7f5674320a"
|
||||
"eu-west-1" = "ami-006d280940ad4a96c"
|
||||
"eu-west-2" = "ami-08fe9ea08db6f1258"
|
||||
"eu-west-3" = "ami-04563f5eab11f2b87"
|
||||
"me-south-1" = "ami-0492a01b319d1f052"
|
||||
"sa-east-1" = "ami-05e16feea94258a69"
|
||||
"us-east-1" = "ami-04d70e069399af2e9"
|
||||
"us-east-2" = "ami-04100f1cdba76b497"
|
||||
"us-west-1" = "ami-014c78f266c5b7163"
|
||||
"us-west-2" = "ami-023b7a69b9328e1f9"
|
||||
}
|
||||
}
|
||||
|
||||
# cloud-init configuration
|
||||
variable "timezone" {
|
||||
default = "UTC"
|
||||
}
|
||||
|
||||
variable "linux_password" {
|
||||
#default = "LiNuXuSeRPaSs#"
|
||||
description = "Set a password for the default user"
|
||||
}
|
||||
|
||||
# These will go in the generated tpot.conf file
|
||||
variable "tpot_flavor" {
|
||||
default = "STANDARD"
|
||||
description = "Specify your tpot flavor [STANDARD, SENSOR, INDUSTRIAL, COLLECTOR, NEXTGEN]"
|
||||
}
|
||||
|
||||
variable "web_user" {
|
||||
default = "webuser"
|
||||
description = "Set a username for the web user"
|
||||
}
|
||||
|
||||
variable "web_password" {
|
||||
#default = "w3b$ecret"
|
||||
description = "Set a password for the web user"
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
#cloud-config
|
||||
timezone: ${timezone}
|
||||
|
||||
packages:
|
||||
- git
|
||||
|
||||
runcmd:
|
||||
- git clone https://github.com/dtag-dev-sec/tpotce /root/tpot
|
||||
- /root/tpot/iso/installer/install.sh --type=auto --conf=/root/tpot.conf
|
||||
- rm /root/tpot.conf
|
||||
- /sbin/shutdown -r now
|
||||
|
||||
password: ${password}
|
||||
chpasswd:
|
||||
expire: false
|
||||
|
||||
write_files:
|
||||
- content: |
|
||||
# tpot configuration file
|
||||
myCONF_TPOT_FLAVOR='${tpot_flavor}'
|
||||
myCONF_WEB_USER='${web_user}'
|
||||
myCONF_WEB_PW='${web_password}'
|
||||
owner: root:root
|
||||
path: /root/tpot.conf
|
||||
permissions: '0600'
|
|
@ -1,8 +0,0 @@
|
|||
clouds:
|
||||
open-telekom-cloud:
|
||||
auth:
|
||||
project_name: eu-de_your_project
|
||||
username: your_api_user
|
||||
password: your_password
|
||||
user_domain_name: OTC-EU-DE-000000000010000XXXXX
|
||||
auth_url: https://iam.eu-de.otc.t-systems.com/v3
|
|
@ -1,67 +0,0 @@
|
|||
resource "opentelekomcloud_networking_secgroup_v2" "secgroup_1" {
|
||||
name = var.secgroup_name
|
||||
description = var.secgroup_desc
|
||||
}
|
||||
|
||||
resource "opentelekomcloud_networking_secgroup_rule_v2" "secgroup_rule_1" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = opentelekomcloud_networking_secgroup_v2.secgroup_1.id
|
||||
}
|
||||
|
||||
resource "opentelekomcloud_networking_network_v2" "network_1" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
resource "opentelekomcloud_networking_subnet_v2" "subnet_1" {
|
||||
name = var.subnet_name
|
||||
network_id = opentelekomcloud_networking_network_v2.network_1.id
|
||||
cidr = "192.168.0.0/24"
|
||||
dns_nameservers = ["1.1.1.1", "8.8.8.8"]
|
||||
}
|
||||
|
||||
resource "opentelekomcloud_networking_router_v2" "router_1" {
|
||||
name = var.router_name
|
||||
}
|
||||
|
||||
resource "opentelekomcloud_networking_router_interface_v2" "router_interface_1" {
|
||||
router_id = opentelekomcloud_networking_router_v2.router_1.id
|
||||
subnet_id = opentelekomcloud_networking_subnet_v2.subnet_1.id
|
||||
}
|
||||
|
||||
resource "random_id" "tpot" {
|
||||
byte_length = 6
|
||||
prefix = var.ecs_prefix
|
||||
}
|
||||
|
||||
resource "opentelekomcloud_compute_instance_v2" "ecs_1" {
|
||||
availability_zone = var.availabiliy_zone
|
||||
name = random_id.tpot.b64
|
||||
flavor_name = var.flavor
|
||||
key_pair = var.key_pair
|
||||
security_groups = [opentelekomcloud_networking_secgroup_v2.secgroup_1.name]
|
||||
user_data = templatefile("../cloud-init.yaml", {timezone = var.timezone, password = var.linux_password, tpot_flavor = var.tpot_flavor, web_user = var.web_user, web_password = var.web_password})
|
||||
|
||||
network {
|
||||
name = opentelekomcloud_networking_network_v2.network_1.name
|
||||
}
|
||||
|
||||
block_device {
|
||||
uuid = var.image_id
|
||||
source_type = "image"
|
||||
volume_size = var.volume_size
|
||||
destination_type = "volume"
|
||||
delete_on_termination = "true"
|
||||
}
|
||||
|
||||
depends_on = [opentelekomcloud_networking_router_interface_v2.router_interface_1]
|
||||
}
|
||||
|
||||
resource "opentelekomcloud_networking_floatingip_v2" "floatip_1" {
|
||||
}
|
||||
|
||||
resource "opentelekomcloud_compute_floatingip_associate_v2" "fip_2" {
|
||||
floating_ip = opentelekomcloud_networking_floatingip_v2.floatip_1.address
|
||||
instance_id = opentelekomcloud_compute_instance_v2.ecs_1.id
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
output "Admin_UI" {
|
||||
value = "https://${opentelekomcloud_networking_floatingip_v2.floatip_1.address}:64294"
|
||||
}
|
||||
|
||||
output "SSH_Access" {
|
||||
value = "ssh -p 64295 linux@${opentelekomcloud_networking_floatingip_v2.floatip_1.address}"
|
||||
}
|
||||
|
||||
output "Web_UI" {
|
||||
value = "https://${opentelekomcloud_networking_floatingip_v2.floatip_1.address}:64297"
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
provider "opentelekomcloud" {
|
||||
cloud = "open-telekom-cloud"
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
# cloud-init configuration
|
||||
variable "timezone" {
|
||||
default = "UTC"
|
||||
}
|
||||
|
||||
variable "linux_password" {
|
||||
#default = "LiNuXuSeRPaSs#"
|
||||
description = "Set a password for the default user"
|
||||
}
|
||||
|
||||
# Cloud resources name configuration
|
||||
variable "secgroup_name" {
|
||||
default = "tpot-secgroup"
|
||||
}
|
||||
|
||||
variable "secgroup_desc" {
|
||||
default = "T-Pot Security Group"
|
||||
}
|
||||
|
||||
variable "network_name" {
|
||||
default = "tpot-network"
|
||||
}
|
||||
|
||||
variable "subnet_name" {
|
||||
default = "tpot-subnet"
|
||||
}
|
||||
|
||||
variable "router_name" {
|
||||
default = "tpot-router"
|
||||
}
|
||||
|
||||
variable "ecs_prefix" {
|
||||
default = "tpot-"
|
||||
}
|
||||
|
||||
# ECS configuration
|
||||
variable "availabiliy_zone" {
|
||||
default = "eu-de-03"
|
||||
description = "Select an availability zone"
|
||||
}
|
||||
|
||||
variable "flavor" {
|
||||
default = "s2.medium.8"
|
||||
description = "Select a compute flavor"
|
||||
}
|
||||
|
||||
variable "key_pair" {
|
||||
#default = ""
|
||||
description = "Specify your SSH key pair"
|
||||
}
|
||||
|
||||
variable "image_id" {
|
||||
default = "d97dd29c-9318-4e4c-8d3a-7307d1513b77"
|
||||
description = "Select a Debian 10 base image id"
|
||||
}
|
||||
|
||||
variable "volume_size" {
|
||||
default = "128"
|
||||
description = "Set the volume size"
|
||||
}
|
||||
|
||||
# These will go in the generated tpot.conf file
|
||||
variable "tpot_flavor" {
|
||||
default = "STANDARD"
|
||||
description = "Specify your tpot flavor [STANDARD, SENSOR, INDUSTRIAL, COLLECTOR, NEXTGEN]"
|
||||
}
|
||||
|
||||
variable "web_user" {
|
||||
default = "webuser"
|
||||
description = "Set a username for the web user"
|
||||
}
|
||||
|
||||
variable "web_password" {
|
||||
#default = "w3b$ecret"
|
||||
description = "Set a password for the web user"
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
}
|
181
compose/customizer.py
Normal file
|
@ -0,0 +1,181 @@
|
|||
from datetime import datetime
|
||||
import yaml
|
||||
|
||||
version = \
|
||||
"""
|
||||
____ [T-Pot] _ ____ _ _ _
|
||||
/ ___| ___ _ ____ _(_) ___ ___ | __ ) _ _(_) | __| | ___ _ __
|
||||
\___ \ / _ \ '__\ \ / / |/ __/ _ \ | _ \| | | | | |/ _` |/ _ \ '__|
|
||||
___) | __/ | \ V /| | (_| __/ | |_) | |_| | | | (_| | __/ |
|
||||
|____/ \___|_| \_/ |_|\___\___| |____/ \__,_|_|_|\__,_|\___|_| v0.21
|
||||
|
||||
# This script is intended for users who want to build a customized docker-compose.yml for T-Pot.
|
||||
# T-Pot Service Builder will ask for all the docker services to be included in docker-compose.yml.
|
||||
# The configuration file will be checked for conflicting ports.
|
||||
# Port conflicts have to be resolved manually or re-running the script and excluding the conflicting services.
|
||||
# Review the resulting docker-compose-custom.yml and adjust to your needs by (un)commenting the corresponding lines in the config.
|
||||
"""
|
||||
|
||||
header = \
|
||||
"""# T-Pot: CUSTOM EDITION
|
||||
# Generated on: {current_date}
|
||||
"""
|
||||
|
||||
config_filename = "tpot_services.yml"
|
||||
service_filename = "docker-compose-custom.yml"
|
||||
|
||||
|
||||
def load_config(filename):
|
||||
try:
|
||||
with open(filename, 'r') as file:
|
||||
config = yaml.safe_load(file)
|
||||
except:
|
||||
print_color(f"Error: {filename} not found. Exiting.", "red")
|
||||
exit()
|
||||
return config
|
||||
|
||||
|
||||
def prompt_service_include(service_name):
|
||||
while True:
|
||||
try:
|
||||
response = input(f"Include {service_name}? (y/n): ").strip().lower()
|
||||
if response in ['y', 'n']:
|
||||
return response == 'y'
|
||||
else:
|
||||
print_color("Please enter 'y' for yes or 'n' for no.", "red")
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
print_color("Interrupted by user. Exiting.", "red")
|
||||
print()
|
||||
exit()
|
||||
|
||||
|
||||
def check_port_conflicts(selected_services):
|
||||
all_ports = {}
|
||||
conflict_ports = []
|
||||
|
||||
for service_name, config in selected_services.items():
|
||||
ports = config.get('ports', [])
|
||||
for port in ports:
|
||||
# Split the port mapping and take only the host port part
|
||||
parts = port.split(':')
|
||||
host_port = parts[1] if len(parts) == 3 else (parts[0] if parts[1].isdigit() else parts[1])
|
||||
|
||||
# Check for port conflict and associate it with the service name
|
||||
if host_port in all_ports:
|
||||
conflict_ports.append((service_name, host_port))
|
||||
if all_ports[host_port] not in [service for service, _ in conflict_ports]:
|
||||
conflict_ports.append((all_ports[host_port], host_port))
|
||||
else:
|
||||
all_ports[host_port] = service_name
|
||||
|
||||
if conflict_ports:
|
||||
print_color("[WARNING] - Port conflict(s) detected:", "red")
|
||||
for service, port in conflict_ports:
|
||||
print_color(f"{service}: {port}", "red")
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
|
||||
def print_color(text, color):
|
||||
colors = {
|
||||
"red": "\033[91m",
|
||||
"green": "\033[92m",
|
||||
"blue": "\033[94m", # Added blue
|
||||
"magenta": "\033[95m", # Added magenta
|
||||
"end": "\033[0m",
|
||||
}
|
||||
print(f"{colors[color]}{text}{colors['end']}")
|
||||
|
||||
def enforce_dependencies(selected_services, services):
|
||||
# If snare or any tanner services are selected, ensure all are enabled
|
||||
tanner_services = {'snare', 'tanner', 'tanner_redis', 'tanner_phpox', 'tanner_api'}
|
||||
if tanner_services.intersection(selected_services):
|
||||
print_color("[OK] - For Snare / Tanner to work all required services have been added to your configuration.", "green")
|
||||
for service in tanner_services:
|
||||
selected_services[service] = services[service]
|
||||
|
||||
# If kibana is enabled, also enable elasticsearch
|
||||
if 'kibana' in selected_services:
|
||||
selected_services['elasticsearch'] = services['elasticsearch']
|
||||
print_color("[OK] - Kibana requires Elasticsearch which has been added to your configuration.", "green")
|
||||
|
||||
# If spiderfoot is enabled, also enable nginx
|
||||
if 'spiderfoot' in selected_services:
|
||||
selected_services['nginx'] = services['nginx']
|
||||
print_color("[OK] - Spiderfoot requires Nginx which has been added to your configuration.","green")
|
||||
|
||||
|
||||
# If any map services are detected, enable logstash, elasticsearch, nginx, and all map services
|
||||
map_services = {'map_web', 'map_redis', 'map_data'}
|
||||
if map_services.intersection(selected_services):
|
||||
print_color("[OK] - For AttackMap to work all required services have been added to your configuration.", "green")
|
||||
for service in map_services.union({'elasticsearch', 'nginx'}):
|
||||
selected_services[service] = services[service]
|
||||
|
||||
# honeytrap and glutton cannot be active at the same time, always vote in favor of honeytrap
|
||||
if 'honeytrap' in selected_services and 'glutton' in selected_services:
|
||||
# Remove glutton and notify
|
||||
del selected_services['glutton']
|
||||
print_color("[OK] - Honeytrap and Glutton cannot be active at the same time. Glutton has been removed from your configuration.","green")
|
||||
|
||||
|
||||
def remove_unused_networks(selected_services, services, networks):
|
||||
used_networks = set()
|
||||
# Identify networks used by selected services
|
||||
for service_name in selected_services:
|
||||
service_config = services[service_name]
|
||||
if 'networks' in service_config:
|
||||
for network in service_config['networks']:
|
||||
used_networks.add(network)
|
||||
|
||||
# Remove unused networks
|
||||
for network in list(networks):
|
||||
if network not in used_networks:
|
||||
del networks[network]
|
||||
|
||||
|
||||
def main():
|
||||
config = load_config(config_filename)
|
||||
|
||||
# Separate services and networks
|
||||
services = config['services']
|
||||
networks = config.get('networks', {})
|
||||
selected_services = {'tpotinit': services['tpotinit'],
|
||||
'logstash': services['logstash']} # Always include tpotinit and logstash
|
||||
|
||||
for service_name, service_config in services.items():
|
||||
if service_name not in selected_services: # Skip already included services
|
||||
if prompt_service_include(service_name):
|
||||
selected_services[service_name] = service_config
|
||||
|
||||
# Enforce dependencies
|
||||
enforce_dependencies(selected_services, services)
|
||||
|
||||
# Remove unused networks based on selected services
|
||||
remove_unused_networks(selected_services, services, networks)
|
||||
|
||||
output_config = {
|
||||
'networks': networks,
|
||||
'services': selected_services,
|
||||
}
|
||||
|
||||
current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
with open(service_filename, 'w') as file:
|
||||
file.write(header.format(current_date=current_date))
|
||||
yaml.dump(output_config, file, default_flow_style=False, sort_keys=False, indent=2)
|
||||
|
||||
if check_port_conflicts(selected_services):
|
||||
print_color(f"[WARNING] - Adjust the conflicting ports in the {service_filename} or re-run the script and select services that do not occupy the same port(s).",
|
||||
"red")
|
||||
else:
|
||||
print_color(f"[OK] - Custom {service_filename} has been generated without port conflicts.", "green")
|
||||
print_color(f"Copy {service_filename} to ~/tpotce and test with: docker compose -f {service_filename} up", "blue")
|
||||
print_color(f"If everything works, exit with CTRL-C and replace docker-compose.yml with the new config.", "blue")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print_color(version, "magenta")
|
||||
main()
|
350
compose/llm.yml
Normal file
|
@ -0,0 +1,350 @@
|
|||
# T-Pot: LLM
|
||||
networks:
|
||||
beelzebub_local:
|
||||
galah_local:
|
||||
nginx_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Beelzebub service
|
||||
beelzebub:
|
||||
container_name: beelzebub
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- beelzebub_local
|
||||
ports:
|
||||
- "22:22"
|
||||
# - "80:80"
|
||||
# - "2222:2222"
|
||||
# - "3306:3306"
|
||||
# - "8080:8080"
|
||||
image: ${TPOT_REPO}/beelzebub:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
environment:
|
||||
LLM_MODEL: ${BEELZEBUB_LLM_MODEL}
|
||||
LLM_HOST: ${BEELZEBUB_LLM_HOST}
|
||||
OLLAMA_MODEL: ${BEELZEBUB_OLLAMA_MODEL}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/beelzebub/key:/opt/beelzebub/configurations/key
|
||||
- ${TPOT_DATA_PATH}/beelzebub/log:/opt/beelzebub/configurations/log
|
||||
|
||||
# Galah service
|
||||
galah:
|
||||
container_name: galah
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- galah_local
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "8443:8443"
|
||||
- "8080:8080"
|
||||
image: ${TPOT_REPO}/galah:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
environment:
|
||||
LLM_PROVIDER: ${GALAH_LLM_PROVIDER}
|
||||
LLM_SERVER_URL: ${GALAH_LLM_SERVER_URL}
|
||||
LLM_MODEL: ${GALAH_LLM_MODEL}
|
||||
# LLM_TEMPERATURE: ${GALAH_LLM_TEMPERATURE}
|
||||
# LLM_API_KEY: ${GALAH_LLM_API_KEY}
|
||||
# LLM_CLOUD_LOCATION: ${GALAH_LLM_CLOUD_LOCATION}
|
||||
# LLM_CLOUD_PROJECT: ${GALAH_LLM_CLOUD_PROJECT}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/galah/cache:/opt/galah/config/cache
|
||||
- ${TPOT_DATA_PATH}/galah/cert:/opt/galah/config/cert
|
||||
- ${TPOT_DATA_PATH}/galah/log:/opt/galah/log
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Map Redis Service
|
||||
map_redis:
|
||||
container_name: map_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Map Web Service
|
||||
map_web:
|
||||
container_name: map_web
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=AttackMapServer.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Map Data Service
|
||||
map_data:
|
||||
container_name: map_data
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Nginx service
|
||||
nginx:
|
||||
container_name: nginx
|
||||
restart: always
|
||||
environment:
|
||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /var/tmp/nginx/client_body
|
||||
- /var/tmp/nginx/proxy
|
||||
- /var/tmp/nginx/fastcgi
|
||||
- /var/tmp/nginx/uwsgi
|
||||
- /var/tmp/nginx/scgi
|
||||
- /run
|
||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "64297:64297"
|
||||
- "64294:64294"
|
||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
734
compose/mac_win.yml
Normal file
|
@ -0,0 +1,734 @@
|
|||
# T-Pot: MAC_WIN
|
||||
networks:
|
||||
tpotinit_local:
|
||||
adbhoney_local:
|
||||
ciscoasa_local:
|
||||
cowrie_local:
|
||||
dicompot_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
h0neytr4p_local:
|
||||
heralding_local:
|
||||
honeyaml_local:
|
||||
ipphoney_local:
|
||||
mailoney_local:
|
||||
medpot_local:
|
||||
miniprint_local:
|
||||
redishoneypot_local:
|
||||
sentrypeer_local:
|
||||
suricata_local:
|
||||
tanner_local:
|
||||
wordpot_local:
|
||||
nginx_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
########################################
|
||||
#### DEV
|
||||
########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
networks:
|
||||
- tpotinit_local
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Adbhoney service
|
||||
adbhoney:
|
||||
container_name: adbhoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- adbhoney_local
|
||||
ports:
|
||||
- "5555:5555"
|
||||
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
||||
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/cowrie:uid=2000,gid=2000
|
||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
||||
networks:
|
||||
- cowrie_local
|
||||
ports:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dicompot service
|
||||
# Get the Horos Client for testing: https://horosproject.org/
|
||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
||||
dicompot:
|
||||
container_name: dicompot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dicompot_local
|
||||
ports:
|
||||
- "104:11112"
|
||||
- "11112:11112"
|
||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dionaea_local
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "81:81"
|
||||
- "135:135"
|
||||
# - "443:443"
|
||||
# - "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "3306:3306"
|
||||
# - "5060:5060"
|
||||
# - "5060:5060/udp"
|
||||
# - "5061:5061"
|
||||
- "27017:27017"
|
||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# ElasticPot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
||||
|
||||
# H0neytr4p service
|
||||
h0neytr4p:
|
||||
container_name: h0neytr4p
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- h0neytr4p_local
|
||||
ports:
|
||||
- "443:443"
|
||||
# - "80:80"
|
||||
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
|
||||
|
||||
# Heralding service
|
||||
heralding:
|
||||
container_name: heralding
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/heralding:uid=2000,gid=2000
|
||||
networks:
|
||||
- heralding_local
|
||||
ports:
|
||||
# - "21:21"
|
||||
# - "22:22"
|
||||
# - "23:23"
|
||||
# - "25:25"
|
||||
# - "80:80"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
# - "443:443"
|
||||
- "465:465"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
# - "3306:3306"
|
||||
# - "3389:3389"
|
||||
- "1080:1080"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
||||
|
||||
# Honeyaml service
|
||||
honeyaml:
|
||||
container_name: honeyaml
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- honeyaml_local
|
||||
ports:
|
||||
- "3000:8080"
|
||||
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
|
||||
|
||||
# Ipphoney service
|
||||
ipphoney:
|
||||
container_name: ipphoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ipphoney_local
|
||||
ports:
|
||||
- "631:631"
|
||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:25"
|
||||
- "587:25"
|
||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Medpot service
|
||||
medpot:
|
||||
container_name: medpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- medpot_local
|
||||
ports:
|
||||
- "2575:2575"
|
||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
||||
|
||||
# Miniprint service
|
||||
miniprint:
|
||||
container_name: miniprint
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- miniprint_local
|
||||
ports:
|
||||
- "9100:9100"
|
||||
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
|
||||
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
|
||||
|
||||
# Redishoneypot service
|
||||
redishoneypot:
|
||||
container_name: redishoneypot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- redishoneypot_local
|
||||
ports:
|
||||
- "6379:6379"
|
||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
||||
|
||||
# SentryPeer service
|
||||
sentrypeer:
|
||||
container_name: sentrypeer
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
||||
|
||||
#### Snare / Tanner
|
||||
## Tanner Redis Service
|
||||
tanner_redis:
|
||||
container_name: tanner_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## PHP Sandbox service
|
||||
tanner_phpox:
|
||||
container_name: tanner_phpox
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Tanner API Service
|
||||
tanner_api:
|
||||
container_name: tanner_api
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_redis
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
command: tannerapi
|
||||
|
||||
## Tanner Service
|
||||
tanner:
|
||||
container_name: tanner
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_api
|
||||
- tanner_phpox
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
command: tanner
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
||||
|
||||
## Snare Service
|
||||
snare:
|
||||
container_name: snare
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
# Wordpot service
|
||||
wordpot:
|
||||
container_name: wordpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- wordpot_local
|
||||
ports:
|
||||
- "8080:80"
|
||||
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- suricata_local
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
networks:
|
||||
- nginx_local
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Map Redis Service
|
||||
map_redis:
|
||||
container_name: map_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Map Web Service
|
||||
map_web:
|
||||
container_name: map_web
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=AttackMapServer.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Map Data Service
|
||||
map_data:
|
||||
container_name: map_data
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Nginx service
|
||||
nginx:
|
||||
container_name: nginx
|
||||
restart: always
|
||||
environment:
|
||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /var/tmp/nginx/client_body
|
||||
- /var/tmp/nginx/proxy
|
||||
- /var/tmp/nginx/fastcgi
|
||||
- /var/tmp/nginx/uwsgi
|
||||
- /var/tmp/nginx/scgi
|
||||
- /run
|
||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "64297:64297"
|
||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
550
compose/mini.yml
Normal file
|
@ -0,0 +1,550 @@
|
|||
# T-Pot: MINI
|
||||
networks:
|
||||
adbhoney_local:
|
||||
ciscoasa_local:
|
||||
conpot_local_IEC104:
|
||||
conpot_local_guardian_ast:
|
||||
conpot_local_ipmi:
|
||||
conpot_local_kamstrup_382:
|
||||
dicompot_local:
|
||||
honeypots_local:
|
||||
medpot_local:
|
||||
nginx_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Adbhoney service
|
||||
adbhoney:
|
||||
container_name: adbhoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- adbhoney_local
|
||||
ports:
|
||||
- "5555:5555"
|
||||
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
||||
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# Conpot IEC104 service
|
||||
conpot_IEC104:
|
||||
container_name: conpot_iec104
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
||||
- CONPOT_TEMPLATE=IEC104
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_IEC104
|
||||
ports:
|
||||
- "161:161/udp"
|
||||
- "2404:2404"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot guardian_ast service
|
||||
conpot_guardian_ast:
|
||||
container_name: conpot_guardian_ast
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
|
||||
- CONPOT_TEMPLATE=guardian_ast
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_guardian_ast
|
||||
ports:
|
||||
- "10001:10001"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot ipmi
|
||||
conpot_ipmi:
|
||||
container_name: conpot_ipmi
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
||||
- CONPOT_TEMPLATE=ipmi
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_ipmi
|
||||
ports:
|
||||
- "623:623/udp"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot kamstrup_382
|
||||
conpot_kamstrup_382:
|
||||
container_name: conpot_kamstrup_382
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
||||
- CONPOT_TEMPLATE=kamstrup_382
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_kamstrup_382
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Dicompot service
|
||||
# Get the Horos Client for testing: https://horosproject.org/
|
||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
||||
dicompot:
|
||||
container_name: dicompot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dicompot_local
|
||||
ports:
|
||||
- "104:11112"
|
||||
- "11112:11112"
|
||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
||||
|
||||
# Honeypots service
|
||||
honeypots:
|
||||
container_name: honeypots
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp:uid=2000,gid=2000
|
||||
networks:
|
||||
- honeypots_local
|
||||
ports:
|
||||
- "21:21"
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
- "25:25"
|
||||
- "53:53"
|
||||
- "67:67/udp"
|
||||
- "80:80"
|
||||
- "110:110"
|
||||
- "123:123"
|
||||
- "143:143"
|
||||
- "161:161"
|
||||
- "389:389"
|
||||
- "443:443"
|
||||
- "445:445"
|
||||
- "631:631"
|
||||
- "1080:1080"
|
||||
- "1433:1433"
|
||||
- "1521:1521"
|
||||
- "3306:3306"
|
||||
- "3389:3389"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
- "6379:6379"
|
||||
- "6667:6667"
|
||||
- "8080:8080"
|
||||
- "9100:9100"
|
||||
- "9200:9200"
|
||||
- "11211:11211"
|
||||
image: ${TPOT_REPO}/honeypots:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeypots/log:/var/log/honeypots
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/honeytrap:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Medpot service
|
||||
medpot:
|
||||
container_name: medpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- medpot_local
|
||||
ports:
|
||||
- "2575:2575"
|
||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Map Redis Service
|
||||
map_redis:
|
||||
container_name: map_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Map Web Service
|
||||
map_web:
|
||||
container_name: map_web
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=AttackMapServer.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Map Data Service
|
||||
map_data:
|
||||
container_name: map_data
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Nginx service
|
||||
nginx:
|
||||
container_name: nginx
|
||||
restart: always
|
||||
environment:
|
||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /var/tmp/nginx/client_body
|
||||
- /var/tmp/nginx/proxy
|
||||
- /var/tmp/nginx/fastcgi
|
||||
- /var/tmp/nginx/uwsgi
|
||||
- /var/tmp/nginx/scgi
|
||||
- /run
|
||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "64297:64297"
|
||||
- "64294:64294"
|
||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
664
compose/mobile.yml
Normal file
|
@ -0,0 +1,664 @@
|
|||
# T-Pot: MOBILE
|
||||
# Note: This docker compose file has been adjusted to limit the number of tools, services and honeypots to run
|
||||
# T-Pot on a Raspberry Pi 4 (8GB of RAM).
|
||||
# The standard docker compose file should work mostly fine (depending on traffic) if you do not enable a
|
||||
# desktop environment such as LXDE and meet the minimum requirements of 8GB RAM.
|
||||
networks:
|
||||
ciscoasa_local:
|
||||
conpot_local_IEC104:
|
||||
conpot_local_ipmi:
|
||||
conpot_local_kamstrup_382:
|
||||
cowrie_local:
|
||||
dicompot_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
h0neytr4p_local:
|
||||
heralding_local:
|
||||
honeyaml_local:
|
||||
ipphoney_local:
|
||||
log4pot_local:
|
||||
mailoney_local:
|
||||
medpot_local:
|
||||
miniprint_local:
|
||||
redishoneypot_local:
|
||||
sentrypeer_local:
|
||||
tanner_local:
|
||||
wordpot_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# Conpot IEC104 service
|
||||
conpot_IEC104:
|
||||
container_name: conpot_iec104
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
||||
- CONPOT_TEMPLATE=IEC104
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_IEC104
|
||||
ports:
|
||||
- "161:161/udp"
|
||||
- "2404:2404"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot ipmi
|
||||
conpot_ipmi:
|
||||
container_name: conpot_ipmi
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
||||
- CONPOT_TEMPLATE=ipmi
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_ipmi
|
||||
ports:
|
||||
- "623:623/udp"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot kamstrup_382
|
||||
conpot_kamstrup_382:
|
||||
container_name: conpot_kamstrup_382
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
||||
- CONPOT_TEMPLATE=kamstrup_382
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_kamstrup_382
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/cowrie:uid=2000,gid=2000
|
||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
||||
networks:
|
||||
- cowrie_local
|
||||
ports:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dicompot service
|
||||
# Get the Horos Client for testing: https://horosproject.org/
|
||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
||||
dicompot:
|
||||
container_name: dicompot
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dicompot_local
|
||||
ports:
|
||||
- "104:11112"
|
||||
- "11112:11112"
|
||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dionaea_local
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "81:81"
|
||||
- "135:135"
|
||||
# - "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "3306:3306"
|
||||
# - "5060:5060"
|
||||
# - "5060:5060/udp"
|
||||
# - "5061:5061"
|
||||
- "27017:27017"
|
||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# ElasticPot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
||||
|
||||
# H0neytr4p service
|
||||
h0neytr4p:
|
||||
container_name: h0neytr4p
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- h0neytr4p_local
|
||||
ports:
|
||||
- "443:443"
|
||||
# - "80:80"
|
||||
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
|
||||
|
||||
# Heralding service
|
||||
heralding:
|
||||
container_name: heralding
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/heralding:uid=2000,gid=2000
|
||||
networks:
|
||||
- heralding_local
|
||||
ports:
|
||||
# - "21:21"
|
||||
# - "22:22"
|
||||
# - "23:23"
|
||||
# - "25:25"
|
||||
# - "80:80"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
# - "443:443"
|
||||
- "465:465"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
# - "3306:3306"
|
||||
# - "3389:3389"
|
||||
- "1080:1080"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
||||
|
||||
# Honeyaml service
|
||||
honeyaml:
|
||||
container_name: honeyaml
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- honeyaml_local
|
||||
ports:
|
||||
- "3000:8080"
|
||||
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/honeytrap:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Ipphoney service
|
||||
ipphoney:
|
||||
container_name: ipphoney
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ipphoney_local
|
||||
ports:
|
||||
- "631:631"
|
||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
||||
|
||||
# Log4pot service
|
||||
log4pot:
|
||||
container_name: log4pot
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp:uid=2000,gid=2000
|
||||
networks:
|
||||
- log4pot_local
|
||||
ports:
|
||||
# - "80:8080"
|
||||
# - "443:8080"
|
||||
# - "8080:8080"
|
||||
# - "9200:8080"
|
||||
- "25565:8080"
|
||||
image: ${TPOT_REPO}/log4pot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/log4pot/log:/var/log/log4pot/log
|
||||
- ${TPOT_DATA_PATH}/log4pot/payloads:/var/log/log4pot/payloads
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:25"
|
||||
- "587:25"
|
||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Medpot service
|
||||
medpot:
|
||||
container_name: medpot
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- medpot_local
|
||||
ports:
|
||||
- "2575:2575"
|
||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
||||
|
||||
# Miniprint service
|
||||
miniprint:
|
||||
container_name: miniprint
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- miniprint_local
|
||||
ports:
|
||||
- "9100:9100"
|
||||
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
|
||||
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
|
||||
|
||||
# Redishoneypot service
|
||||
redishoneypot:
|
||||
container_name: redishoneypot
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- redishoneypot_local
|
||||
ports:
|
||||
- "6379:6379"
|
||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
||||
|
||||
# SentryPeer service
|
||||
sentrypeer:
|
||||
container_name: sentrypeer
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
||||
|
||||
#### Snare / Tanner
|
||||
## Tanner Redis Service
|
||||
tanner_redis:
|
||||
container_name: tanner_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## PHP Sandbox service
|
||||
tanner_phpox:
|
||||
container_name: tanner_phpox
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Tanner API Service
|
||||
tanner_api:
|
||||
container_name: tanner_api
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_redis
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
command: tannerapi
|
||||
|
||||
## Tanner Service
|
||||
tanner:
|
||||
container_name: tanner
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_api
|
||||
- tanner_phpox
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
command: tanner
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
||||
|
||||
## Snare Service
|
||||
snare:
|
||||
container_name: snare
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
# Wordpot service
|
||||
wordpot:
|
||||
container_name: wordpot
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- wordpot_local
|
||||
ports:
|
||||
- "8080:80"
|
||||
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
711
compose/sensor.yml
Normal file
|
@ -0,0 +1,711 @@
|
|||
# T-Pot: SENSOR
|
||||
networks:
|
||||
adbhoney_local:
|
||||
ciscoasa_local:
|
||||
conpot_local_IEC104:
|
||||
conpot_local_guardian_ast:
|
||||
conpot_local_ipmi:
|
||||
conpot_local_kamstrup_382:
|
||||
cowrie_local:
|
||||
dicompot_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
h0neytr4p_local:
|
||||
heralding_local:
|
||||
honeyaml_local:
|
||||
ipphoney_local:
|
||||
mailoney_local:
|
||||
medpot_local:
|
||||
miniprint_local:
|
||||
redishoneypot_local:
|
||||
sentrypeer_local:
|
||||
tanner_local:
|
||||
wordpot_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Adbhoney service
|
||||
adbhoney:
|
||||
container_name: adbhoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- adbhoney_local
|
||||
ports:
|
||||
- "5555:5555"
|
||||
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
||||
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# Conpot IEC104 service
|
||||
conpot_IEC104:
|
||||
container_name: conpot_iec104
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
||||
- CONPOT_TEMPLATE=IEC104
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_IEC104
|
||||
ports:
|
||||
- "161:161/udp"
|
||||
- "2404:2404"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot guardian_ast service
|
||||
conpot_guardian_ast:
|
||||
container_name: conpot_guardian_ast
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
|
||||
- CONPOT_TEMPLATE=guardian_ast
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_guardian_ast
|
||||
ports:
|
||||
- "10001:10001"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot ipmi
|
||||
conpot_ipmi:
|
||||
container_name: conpot_ipmi
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
||||
- CONPOT_TEMPLATE=ipmi
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_ipmi
|
||||
ports:
|
||||
- "623:623/udp"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot kamstrup_382
|
||||
conpot_kamstrup_382:
|
||||
container_name: conpot_kamstrup_382
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
||||
- CONPOT_TEMPLATE=kamstrup_382
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_kamstrup_382
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/cowrie:uid=2000,gid=2000
|
||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
||||
networks:
|
||||
- cowrie_local
|
||||
ports:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dicompot service
|
||||
# Get the Horos Client for testing: https://horosproject.org/
|
||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
||||
dicompot:
|
||||
container_name: dicompot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dicompot_local
|
||||
ports:
|
||||
- "104:11112"
|
||||
- "11112:11112"
|
||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dionaea_local
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "81:81"
|
||||
- "135:135"
|
||||
# - "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "3306:3306"
|
||||
# - "5060:5060"
|
||||
# - "5060:5060/udp"
|
||||
# - "5061:5061"
|
||||
- "27017:27017"
|
||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# ElasticPot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
||||
|
||||
# H0neytr4p service
|
||||
h0neytr4p:
|
||||
container_name: h0neytr4p
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- h0neytr4p_local
|
||||
ports:
|
||||
- "443:443"
|
||||
# - "80:80"
|
||||
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
|
||||
|
||||
# Heralding service
|
||||
heralding:
|
||||
container_name: heralding
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/heralding:uid=2000,gid=2000
|
||||
networks:
|
||||
- heralding_local
|
||||
ports:
|
||||
# - "21:21"
|
||||
# - "22:22"
|
||||
# - "23:23"
|
||||
# - "25:25"
|
||||
# - "80:80"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
# - "443:443"
|
||||
- "465:465"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
# - "3306:3306"
|
||||
# - "3389:3389"
|
||||
- "1080:1080"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
||||
|
||||
# Honeyaml service
|
||||
honeyaml:
|
||||
container_name: honeyaml
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- honeyaml_local
|
||||
ports:
|
||||
- "3000:8080"
|
||||
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/honeytrap:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Ipphoney service
|
||||
ipphoney:
|
||||
container_name: ipphoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ipphoney_local
|
||||
ports:
|
||||
- "631:631"
|
||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:25"
|
||||
- "587:25"
|
||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Medpot service
|
||||
medpot:
|
||||
container_name: medpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- medpot_local
|
||||
ports:
|
||||
- "2575:2575"
|
||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
||||
|
||||
# Miniprint service
|
||||
miniprint:
|
||||
container_name: miniprint
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- miniprint_local
|
||||
ports:
|
||||
- "9100:9100"
|
||||
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
|
||||
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
|
||||
|
||||
# Redishoneypot service
|
||||
redishoneypot:
|
||||
container_name: redishoneypot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- redishoneypot_local
|
||||
ports:
|
||||
- "6379:6379"
|
||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
||||
|
||||
# SentryPeer service
|
||||
sentrypeer:
|
||||
container_name: sentrypeer
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
||||
|
||||
#### Snare / Tanner
|
||||
## Tanner Redis Service
|
||||
tanner_redis:
|
||||
container_name: tanner_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## PHP Sandbox service
|
||||
tanner_phpox:
|
||||
container_name: tanner_phpox
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Tanner API Service
|
||||
tanner_api:
|
||||
container_name: tanner_api
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_redis
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
command: tannerapi
|
||||
|
||||
## Tanner Service
|
||||
tanner:
|
||||
container_name: tanner
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_api
|
||||
- tanner_phpox
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
command: tanner
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
||||
|
||||
## Snare Service
|
||||
snare:
|
||||
container_name: snare
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
# Wordpot service
|
||||
wordpot:
|
||||
container_name: wordpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- wordpot_local
|
||||
ports:
|
||||
- "8080:80"
|
||||
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
856
compose/standard.yml
Normal file
|
@ -0,0 +1,856 @@
|
|||
# T-Pot: STANDARD
|
||||
networks:
|
||||
adbhoney_local:
|
||||
ciscoasa_local:
|
||||
conpot_local_IEC104:
|
||||
conpot_local_guardian_ast:
|
||||
conpot_local_ipmi:
|
||||
conpot_local_kamstrup_382:
|
||||
cowrie_local:
|
||||
dicompot_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
h0neytr4p_local:
|
||||
heralding_local:
|
||||
honeyaml_local:
|
||||
ipphoney_local:
|
||||
mailoney_local:
|
||||
medpot_local:
|
||||
miniprint_local:
|
||||
redishoneypot_local:
|
||||
sentrypeer_local:
|
||||
tanner_local:
|
||||
wordpot_local:
|
||||
nginx_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Adbhoney service
|
||||
adbhoney:
|
||||
container_name: adbhoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- adbhoney_local
|
||||
ports:
|
||||
- "5555:5555"
|
||||
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
||||
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# Conpot IEC104 service
|
||||
conpot_IEC104:
|
||||
container_name: conpot_iec104
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
||||
- CONPOT_TEMPLATE=IEC104
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_IEC104
|
||||
ports:
|
||||
- "161:161/udp"
|
||||
- "2404:2404"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot guardian_ast service
|
||||
conpot_guardian_ast:
|
||||
container_name: conpot_guardian_ast
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
|
||||
- CONPOT_TEMPLATE=guardian_ast
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_guardian_ast
|
||||
ports:
|
||||
- "10001:10001"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot ipmi
|
||||
conpot_ipmi:
|
||||
container_name: conpot_ipmi
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
||||
- CONPOT_TEMPLATE=ipmi
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_ipmi
|
||||
ports:
|
||||
- "623:623/udp"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot kamstrup_382
|
||||
conpot_kamstrup_382:
|
||||
container_name: conpot_kamstrup_382
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
||||
- CONPOT_TEMPLATE=kamstrup_382
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_kamstrup_382
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/cowrie:uid=2000,gid=2000
|
||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
||||
networks:
|
||||
- cowrie_local
|
||||
ports:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dicompot service
|
||||
# Get the Horos Client for testing: https://horosproject.org/
|
||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
||||
dicompot:
|
||||
container_name: dicompot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dicompot_local
|
||||
ports:
|
||||
- "104:11112"
|
||||
- "11112:11112"
|
||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dionaea_local
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "81:81"
|
||||
- "135:135"
|
||||
# - "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "3306:3306"
|
||||
# - "5060:5060"
|
||||
# - "5060:5060/udp"
|
||||
# - "5061:5061"
|
||||
- "27017:27017"
|
||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# ElasticPot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
||||
|
||||
# H0neytr4p service
|
||||
h0neytr4p:
|
||||
container_name: h0neytr4p
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- h0neytr4p_local
|
||||
ports:
|
||||
- "443:443"
|
||||
# - "80:80"
|
||||
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
|
||||
|
||||
# Heralding service
|
||||
heralding:
|
||||
container_name: heralding
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/heralding:uid=2000,gid=2000
|
||||
networks:
|
||||
- heralding_local
|
||||
ports:
|
||||
# - "21:21"
|
||||
# - "22:22"
|
||||
# - "23:23"
|
||||
# - "25:25"
|
||||
# - "80:80"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
# - "443:443"
|
||||
- "465:465"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
# - "3306:3306"
|
||||
# - "3389:3389"
|
||||
- "1080:1080"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
||||
|
||||
# Honeyaml service
|
||||
honeyaml:
|
||||
container_name: honeyaml
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- honeyaml_local
|
||||
ports:
|
||||
- "3000:8080"
|
||||
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/honeytrap:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Ipphoney service
|
||||
ipphoney:
|
||||
container_name: ipphoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ipphoney_local
|
||||
ports:
|
||||
- "631:631"
|
||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:25"
|
||||
- "587:25"
|
||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Medpot service
|
||||
medpot:
|
||||
container_name: medpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- medpot_local
|
||||
ports:
|
||||
- "2575:2575"
|
||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
||||
|
||||
# Miniprint service
|
||||
miniprint:
|
||||
container_name: miniprint
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- miniprint_local
|
||||
ports:
|
||||
- "9100:9100"
|
||||
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
|
||||
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
|
||||
|
||||
# Redishoneypot service
|
||||
redishoneypot:
|
||||
container_name: redishoneypot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- redishoneypot_local
|
||||
ports:
|
||||
- "6379:6379"
|
||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
||||
|
||||
# SentryPeer service
|
||||
sentrypeer:
|
||||
container_name: sentrypeer
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
||||
|
||||
#### Snare / Tanner
|
||||
## Tanner Redis Service
|
||||
tanner_redis:
|
||||
container_name: tanner_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## PHP Sandbox service
|
||||
tanner_phpox:
|
||||
container_name: tanner_phpox
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Tanner API Service
|
||||
tanner_api:
|
||||
container_name: tanner_api
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_redis
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
command: tannerapi
|
||||
|
||||
## Tanner Service
|
||||
tanner:
|
||||
container_name: tanner
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_api
|
||||
- tanner_phpox
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
command: tanner
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
||||
|
||||
## Snare Service
|
||||
snare:
|
||||
container_name: snare
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
# Wordpot service
|
||||
wordpot:
|
||||
container_name: wordpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- wordpot_local
|
||||
ports:
|
||||
- "8080:80"
|
||||
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Map Redis Service
|
||||
map_redis:
|
||||
container_name: map_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Map Web Service
|
||||
map_web:
|
||||
container_name: map_web
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=AttackMapServer.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Map Data Service
|
||||
map_data:
|
||||
container_name: map_data
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Nginx service
|
||||
nginx:
|
||||
container_name: nginx
|
||||
restart: always
|
||||
environment:
|
||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /var/tmp/nginx/client_body
|
||||
- /var/tmp/nginx/proxy
|
||||
- /var/tmp/nginx/fastcgi
|
||||
- /var/tmp/nginx/uwsgi
|
||||
- /var/tmp/nginx/scgi
|
||||
- /run
|
||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "64297:64297"
|
||||
- "64294:64294"
|
||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
406
compose/tarpit.yml
Normal file
|
@ -0,0 +1,406 @@
|
|||
# T-Pot: TARPIT
|
||||
networks:
|
||||
ddospot_local:
|
||||
endlessh_local:
|
||||
go-pot_local:
|
||||
hellpot_local:
|
||||
heralding_local:
|
||||
nginx_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Ddospot service
|
||||
ddospot:
|
||||
container_name: ddospot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ddospot_local
|
||||
ports:
|
||||
- "19:19/udp"
|
||||
- "53:53/udp"
|
||||
- "123:123/udp"
|
||||
# - "161:161/udp"
|
||||
- "1900:1900/udp"
|
||||
image: ${TPOT_REPO}/ddospot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ddospot/log:/opt/ddospot/ddospot/logs
|
||||
- ${TPOT_DATA_PATH}/ddospot/bl:/opt/ddospot/ddospot/bl
|
||||
- ${TPOT_DATA_PATH}/ddospot/db:/opt/ddospot/ddospot/db
|
||||
|
||||
# Endlessh service
|
||||
endlessh:
|
||||
container_name: endlessh
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- endlessh_local
|
||||
ports:
|
||||
- "22:2222"
|
||||
image: ${TPOT_REPO}/endlessh:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/endlessh/log:/var/log/endlessh
|
||||
|
||||
# Go-pot service
|
||||
go-pot:
|
||||
container_name: go-pot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- go-pot_local
|
||||
ports:
|
||||
- "8080:8080"
|
||||
image: ${TPOT_REPO}/go-pot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/go-pot/log:/opt/go-pot/log/
|
||||
|
||||
# Hellpot service
|
||||
hellpot:
|
||||
container_name: hellpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- hellpot_local
|
||||
ports:
|
||||
- "80:8080"
|
||||
image: ${TPOT_REPO}/hellpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/hellpot/log:/var/log/hellpot
|
||||
|
||||
# Heralding service
|
||||
heralding:
|
||||
container_name: heralding
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/heralding:uid=2000,gid=2000
|
||||
networks:
|
||||
- heralding_local
|
||||
ports:
|
||||
- "21:21"
|
||||
# - "22:22"
|
||||
- "23:23"
|
||||
- "25:25"
|
||||
# - "80:80"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
- "443:443"
|
||||
- "465:465"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
- "3306:3306"
|
||||
- "3389:3389"
|
||||
- "1080:1080"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
||||
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Map Redis Service
|
||||
map_redis:
|
||||
container_name: map_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Map Web Service
|
||||
map_web:
|
||||
container_name: map_web
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=AttackMapServer.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Map Data Service
|
||||
map_data:
|
||||
container_name: map_data
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Nginx service
|
||||
nginx:
|
||||
container_name: nginx
|
||||
restart: always
|
||||
environment:
|
||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /var/tmp/nginx/client_body
|
||||
- /var/tmp/nginx/proxy
|
||||
- /var/tmp/nginx/fastcgi
|
||||
- /var/tmp/nginx/uwsgi
|
||||
- /var/tmp/nginx/scgi
|
||||
- /run
|
||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "64297:64297"
|
||||
- "64294:64294"
|
||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
1111
compose/tpot_services.yml
Normal file
153
deploy.sh
Executable file
|
@ -0,0 +1,153 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
myANSIBLE_PORT=64295
|
||||
myANSIBLE_TPOT_PLAYBOOK="installer/install/deploy.yml"
|
||||
myADJECTIVE=$(shuf -n1 installer/install/a.txt)
|
||||
myNOUN=$(shuf -n1 installer/install/n.txt)
|
||||
myENV_FILE="$HOME/tpotce/.env"
|
||||
|
||||
myDEPLOY=$(cat << "EOF"
|
||||
|
||||
____ [ T-Pot ] ____ _
|
||||
/ ___| ___ _ __ ___ ___ _ __ | _ \ ___ _ __ | | ___ _ _
|
||||
\___ \ / _ \ _ \/ __|/ _ \| __| | | | |/ _ \ _ \| |/ _ \| | | |
|
||||
___) | __/ | | \__ \ (_) | | | |_| | __/ |_) | | (_) | |_| |
|
||||
|____/ \___|_| |_|___/\___/|_| |____/ \___| .__/|_|\___/ \__, |
|
||||
|_| |___/
|
||||
|
||||
EOF
|
||||
)
|
||||
|
||||
# Check if the script is running in a HIVE installation
|
||||
if ! grep -q 'TPOT_TYPE=HIVE' "$HOME/tpotce/.env";
|
||||
then
|
||||
echo "# This script is only supported on HIVE installations."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if running on a supported distribution
|
||||
mySUPPORTED_DISTRIBUTIONS=("AlmaLinux" "Debian GNU/Linux" "Fedora Linux" "openSUSE Tumbleweed" "Raspbian GNU/Linux" "Rocky Linux" "Ubuntu")
|
||||
myCURRENT_DISTRIBUTION=$(awk -F= '/^NAME/{print $2}' /etc/os-release | tr -d '"')
|
||||
|
||||
if [[ ! " ${mySUPPORTED_DISTRIBUTIONS[@]} " =~ " ${myCURRENT_DISTRIBUTION} " ]];
|
||||
then
|
||||
echo "# Only the following distributions are supported: AlmaLinux, Fedora, Debian, openSUSE Tumbleweed, Rocky Linux and Ubuntu."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "${myDEPLOY}"
|
||||
echo
|
||||
echo "# This script will prepare a T-Pot SENSOR installation to transmit logs into this HIVE."
|
||||
echo
|
||||
|
||||
# Ask if a T-Pot SENSOR was installed
|
||||
read -p "# Was a T-Pot SENSOR installed? (y/n): " mySENSOR_INSTALLED
|
||||
if [[ ${mySENSOR_INSTALLED} != "y" ]];
|
||||
then
|
||||
echo "# A T-Pot SENSOR must be installed to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ask for the remote user
|
||||
read -p "# Enter the remote username T-Pot SENSOR was installed with: " mySSHUSER
|
||||
if [[ ${mySSHUSER} == "" ]];
|
||||
then
|
||||
echo "# You need to enter a user. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate IP/domain name loop
|
||||
while true; do
|
||||
read -p "# Enter the IP/domain name of the SENSOR: " mySENSOR_IP
|
||||
if [[ ${mySENSOR_IP} =~ ^([a-zA-Z0-9]+(\.[a-zA-Z0-9]+)*\.[a-zA-Z]{2,})|(([0-9]{1,3}\.){3}[0-9]{1,3})$ ]];
|
||||
then
|
||||
break
|
||||
else
|
||||
echo "# Invalid IP/domain. Please enter a valid IP or domain name."
|
||||
fi
|
||||
done
|
||||
|
||||
# Check if ssh key has been deployed
|
||||
read -p "# Has a SSH key been deployed to the SENSOR? (y/n): " mySSHKEY_DEPLOYED
|
||||
if [[ ${mySSHKEY_DEPLOYED} != "y" ]];
|
||||
then
|
||||
echo "# Generate a SSH key using 'ssh-keygen' and deploy it to the SENSOR (Example: ssh-copy-id -p 64295 ${mySSHUSER}@${mySENSOR_IP})."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate IP/domain name of HIVE
|
||||
while true; do
|
||||
read -p "# Enter the IP/domain name of this HIVE: " myTPOT_HIVE_IP
|
||||
if [[ ${myTPOT_HIVE_IP} =~ ^([a-zA-Z0-9]+(\.[a-zA-Z0-9]+)*\.[a-zA-Z]{2,})|(([0-9]{1,3}\.){3}[0-9]{1,3})$ ]];
|
||||
then
|
||||
break
|
||||
else
|
||||
echo "# Invalid IP/domain. Please enter a valid IP or domain name."
|
||||
fi
|
||||
done
|
||||
|
||||
# Create a random SENSOR user name that is easily readable
|
||||
myLS_WEB_USER="sensor-${myADJECTIVE}-${myNOUN}"
|
||||
|
||||
# Create a random password
|
||||
myLS_WEB_PW=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 32 | head -n 1)
|
||||
|
||||
# Create myLS_WEB_USER_ENC
|
||||
myLS_WEB_USER_ENC=$(htpasswd -b -n "${myLS_WEB_USER}" "${myLS_WEB_PW}")
|
||||
myLS_WEB_USER_ENC_B64=$(echo -n "${myLS_WEB_USER_ENC}" | base64 -w0)
|
||||
|
||||
# Create myTPOT_HIVE_USER, since this is for Logstash on the SENSOR, it needs to directly base64 encoded
|
||||
myTPOT_HIVE_USER=$(echo -n "${myLS_WEB_USER}:${myLS_WEB_PW}" | base64 -w0)
|
||||
|
||||
# Print credentials
|
||||
echo "# The following SENSOR credentials have been created:"
|
||||
echo "# New SENSOR username: ${myLS_WEB_USER}"
|
||||
echo "# New SENSOR passowrd: ${myLS_WEB_PW}"
|
||||
echo "# New htpasswd encoded credentials: ${myLS_WEB_USER_ENC}"
|
||||
echo "# New htpasswd credentials base64 encoded: ${myLS_WEB_USER_ENC_B64}"
|
||||
echo "# New SENSOR credentials base64 encoded: ${myTPOT_HIVE_USER}"
|
||||
echo
|
||||
echo "# Ansible will ask for the ‘BECOME password‘ which is typically the password you ’sudo’ with on the SENSOR."
|
||||
echo "# The password will allow Ansible to run a reboot via sudo on the SENSOR."
|
||||
echo
|
||||
|
||||
# Read LS_WEB_USER from file
|
||||
myENV_LS_WEB_USER=$(grep "^LS_WEB_USER=" "${myENV_FILE}" | sed 's/^LS_WEB_USER=//g' | tr -d "\"'")
|
||||
|
||||
# Add the new SENSOR user
|
||||
if [ "${myENV_LS_WEB_USER}" == "" ];
|
||||
then
|
||||
myENV_LS_WEB_USER="${myLS_WEB_USER_ENC_B64}"
|
||||
else
|
||||
myENV_LS_WEB_USER="${myENV_LS_WEB_USER} ${myLS_WEB_USER_ENC_B64}"
|
||||
fi
|
||||
|
||||
# Need to export for Ansible
|
||||
export myTPOT_HIVE_USER
|
||||
export myTPOT_HIVE_IP
|
||||
|
||||
ANSIBLE_LOG_PATH=${HOME}/tpotce/data/deploy_sensor.log ansible-playbook ${myANSIBLE_TPOT_PLAYBOOK} -i ${mySENSOR_IP}, -c ssh -u ${mySSHUSER} --ask-become-pass -e "ansible_port=${myANSIBLE_PORT}"
|
||||
|
||||
if [ "$?" == 0 ];
|
||||
then
|
||||
# Update the T-Pot .env config and lswebpasswd (avoid the need to restart T-Pot) on the host
|
||||
echo "# Updating SENSOR users on this HIVE and in the T-Pot .env config:"
|
||||
sed -i "/^LS_WEB_USER=/c\LS_WEB_USER=$myENV_LS_WEB_USER" "${myENV_FILE}"
|
||||
: > "${HOME}"/tpotce/data/nginx/conf/lswebpasswd
|
||||
for i in $myENV_LS_WEB_USER;
|
||||
do
|
||||
if [[ -n $i ]];
|
||||
then
|
||||
# Need to control newlines as they kept coming up for some reason
|
||||
echo -n "$i" | base64 -d -w0
|
||||
echo
|
||||
echo -n "$i" | base64 -d -w0 | tr -d '\n' >> ${HOME}/tpotce/data/nginx/conf/lswebpasswd
|
||||
echo >> ${HOME}/tpotce/data/nginx/conf/lswebpasswd
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
unset myTPOT_HIVE_USER
|
||||
unset myTPOT_HIVE_IP
|
Before Width: | Height: | Size: 408 KiB After Width: | Height: | Size: 443 KiB |
BIN
doc/attackmap.png
Normal file
After Width: | Height: | Size: 480 KiB |
BIN
doc/cockpit1.png
Before Width: | Height: | Size: 140 KiB |
BIN
doc/cockpit2.png
Before Width: | Height: | Size: 185 KiB |
BIN
doc/cockpit3.png
Before Width: | Height: | Size: 336 KiB |
Before Width: | Height: | Size: 101 KiB After Width: | Height: | Size: 117 KiB |
Before Width: | Height: | Size: 368 KiB |
BIN
doc/dockerui.png
Before Width: | Height: | Size: 87 KiB |
BIN
doc/elasticvue.png
Normal file
After Width: | Height: | Size: 174 KiB |
Before Width: | Height: | Size: 127 KiB |
BIN
doc/heimdall.png
Before Width: | Height: | Size: 354 KiB |
BIN
doc/kibana.png
Before Width: | Height: | Size: 368 KiB |
BIN
doc/kibana_a.png
Normal file
After Width: | Height: | Size: 608 KiB |
BIN
doc/kibana_b.png
Normal file
After Width: | Height: | Size: 98 KiB |
BIN
doc/kibana_c.png
Normal file
After Width: | Height: | Size: 310 KiB |
BIN
doc/netdata.png
Before Width: | Height: | Size: 199 KiB |
Before Width: | Height: | Size: 133 KiB After Width: | Height: | Size: 162 KiB |
BIN
doc/tpotwebui.png
Normal file
After Width: | Height: | Size: 486 KiB |
BIN
doc/webssh.png
Before Width: | Height: | Size: 148 KiB |
856
docker-compose.yml
Normal file
|
@ -0,0 +1,856 @@
|
|||
# T-Pot: STANDARD
|
||||
networks:
|
||||
adbhoney_local:
|
||||
ciscoasa_local:
|
||||
conpot_local_IEC104:
|
||||
conpot_local_guardian_ast:
|
||||
conpot_local_ipmi:
|
||||
conpot_local_kamstrup_382:
|
||||
cowrie_local:
|
||||
dicompot_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
h0neytr4p_local:
|
||||
heralding_local:
|
||||
honeyaml_local:
|
||||
ipphoney_local:
|
||||
mailoney_local:
|
||||
medpot_local:
|
||||
miniprint_local:
|
||||
redishoneypot_local:
|
||||
sentrypeer_local:
|
||||
tanner_local:
|
||||
wordpot_local:
|
||||
nginx_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Adbhoney service
|
||||
adbhoney:
|
||||
container_name: adbhoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- adbhoney_local
|
||||
ports:
|
||||
- "5555:5555"
|
||||
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
||||
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# Conpot IEC104 service
|
||||
conpot_IEC104:
|
||||
container_name: conpot_iec104
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
||||
- CONPOT_TEMPLATE=IEC104
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_IEC104
|
||||
ports:
|
||||
- "161:161/udp"
|
||||
- "2404:2404"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot guardian_ast service
|
||||
conpot_guardian_ast:
|
||||
container_name: conpot_guardian_ast
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
|
||||
- CONPOT_TEMPLATE=guardian_ast
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_guardian_ast
|
||||
ports:
|
||||
- "10001:10001"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot ipmi
|
||||
conpot_ipmi:
|
||||
container_name: conpot_ipmi
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
||||
- CONPOT_TEMPLATE=ipmi
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_ipmi
|
||||
ports:
|
||||
- "623:623/udp"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot kamstrup_382
|
||||
conpot_kamstrup_382:
|
||||
container_name: conpot_kamstrup_382
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
||||
- CONPOT_TEMPLATE=kamstrup_382
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_kamstrup_382
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/cowrie:uid=2000,gid=2000
|
||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
||||
networks:
|
||||
- cowrie_local
|
||||
ports:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dicompot service
|
||||
# Get the Horos Client for testing: https://horosproject.org/
|
||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
||||
dicompot:
|
||||
container_name: dicompot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dicompot_local
|
||||
ports:
|
||||
- "104:11112"
|
||||
- "11112:11112"
|
||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dionaea_local
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "81:81"
|
||||
- "135:135"
|
||||
# - "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "3306:3306"
|
||||
# - "5060:5060"
|
||||
# - "5060:5060/udp"
|
||||
# - "5061:5061"
|
||||
- "27017:27017"
|
||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# ElasticPot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
||||
|
||||
# H0neytr4p service
|
||||
h0neytr4p:
|
||||
container_name: h0neytr4p
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- h0neytr4p_local
|
||||
ports:
|
||||
- "443:443"
|
||||
# - "80:80"
|
||||
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
|
||||
|
||||
# Heralding service
|
||||
heralding:
|
||||
container_name: heralding
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/heralding:uid=2000,gid=2000
|
||||
networks:
|
||||
- heralding_local
|
||||
ports:
|
||||
# - "21:21"
|
||||
# - "22:22"
|
||||
# - "23:23"
|
||||
# - "25:25"
|
||||
# - "80:80"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
# - "443:443"
|
||||
- "465:465"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
# - "3306:3306"
|
||||
# - "3389:3389"
|
||||
- "1080:1080"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
||||
|
||||
# Honeyaml service
|
||||
honeyaml:
|
||||
container_name: honeyaml
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- honeyaml_local
|
||||
ports:
|
||||
- "3000:8080"
|
||||
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/honeytrap:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Ipphoney service
|
||||
ipphoney:
|
||||
container_name: ipphoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ipphoney_local
|
||||
ports:
|
||||
- "631:631"
|
||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:25"
|
||||
- "587:25"
|
||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Medpot service
|
||||
medpot:
|
||||
container_name: medpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- medpot_local
|
||||
ports:
|
||||
- "2575:2575"
|
||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
||||
|
||||
# Miniprint service
|
||||
miniprint:
|
||||
container_name: miniprint
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- miniprint_local
|
||||
ports:
|
||||
- "9100:9100"
|
||||
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
|
||||
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
|
||||
|
||||
# Redishoneypot service
|
||||
redishoneypot:
|
||||
container_name: redishoneypot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- redishoneypot_local
|
||||
ports:
|
||||
- "6379:6379"
|
||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
||||
|
||||
# SentryPeer service
|
||||
sentrypeer:
|
||||
container_name: sentrypeer
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
||||
|
||||
#### Snare / Tanner
|
||||
## Tanner Redis Service
|
||||
tanner_redis:
|
||||
container_name: tanner_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## PHP Sandbox service
|
||||
tanner_phpox:
|
||||
container_name: tanner_phpox
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Tanner API Service
|
||||
tanner_api:
|
||||
container_name: tanner_api
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_redis
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
command: tannerapi
|
||||
|
||||
## Tanner Service
|
||||
tanner:
|
||||
container_name: tanner
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_api
|
||||
- tanner_phpox
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
command: tanner
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
||||
|
||||
## Snare Service
|
||||
snare:
|
||||
container_name: snare
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
# Wordpot service
|
||||
wordpot:
|
||||
container_name: wordpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- wordpot_local
|
||||
ports:
|
||||
- "8080:80"
|
||||
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Map Redis Service
|
||||
map_redis:
|
||||
container_name: map_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Map Web Service
|
||||
map_web:
|
||||
container_name: map_web
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=AttackMapServer.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Map Data Service
|
||||
map_data:
|
||||
container_name: map_data
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Nginx service
|
||||
nginx:
|
||||
container_name: nginx
|
||||
restart: always
|
||||
environment:
|
||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /var/tmp/nginx/client_body
|
||||
- /var/tmp/nginx/proxy
|
||||
- /var/tmp/nginx/fastcgi
|
||||
- /var/tmp/nginx/uwsgi
|
||||
- /var/tmp/nginx/scgi
|
||||
- /run
|
||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "64297:64297"
|
||||
- "64294:64294"
|
||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
23
docker/_builder/.env
Normal file
|
@ -0,0 +1,23 @@
|
|||
# T-Pot builder config file. Do not remove.
|
||||
|
||||
##########################
|
||||
# T-Pot Builder Settings #
|
||||
##########################
|
||||
|
||||
# docker compose .env
|
||||
TPOT_DOCKER_ENV=./.env
|
||||
|
||||
# Docker-Compose file
|
||||
TPOT_DOCKER_COMPOSE=./docker-compose.yml
|
||||
|
||||
# T-Pot Repos
|
||||
TPOT_DOCKER_REPO=dtagdevsec
|
||||
TPOT_GHCR_REPO=ghcr.io/telekom-security
|
||||
|
||||
# T-Pot Version Tag
|
||||
TPOT_VERSION=24.04.1
|
||||
|
||||
# T-Pot platforms (architectures)
|
||||
# Most docker features are available on linux
|
||||
TPOT_AMD64=linux/amd64
|
||||
TPOT_ARM64=linux/arm64
|
202
docker/_builder/builder.sh
Executable file
|
@ -0,0 +1,202 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Got root?
|
||||
myWHOAMI=$(whoami)
|
||||
if [ "$myWHOAMI" != "root" ]
|
||||
then
|
||||
echo "Need to run as root ..."
|
||||
exit
|
||||
fi
|
||||
|
||||
# ANSI color codes for green (OK) and red (FAIL)
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Default settings
|
||||
PUSH_IMAGES=false
|
||||
NO_CACHE=false
|
||||
PARALLELBUILDS=2
|
||||
UPLOAD_BANDWIDTH=40mbit # Set this to max 90% of available upload bandwidth
|
||||
INTERFACE=$(ip route | grep "^default" | awk '{ print $5 }')
|
||||
|
||||
# Help message
|
||||
usage() {
|
||||
echo "Usage: $0 [-p] [-n] [-h]"
|
||||
echo " -p Push images after building"
|
||||
echo " -n Build images with --no-cache"
|
||||
echo " -h Show help message"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Parse command-line options
|
||||
while getopts ":pnh" opt; do
|
||||
case ${opt} in
|
||||
p )
|
||||
PUSH_IMAGES=true
|
||||
docker login
|
||||
docker login ghcr.io
|
||||
;;
|
||||
n )
|
||||
NO_CACHE=true
|
||||
;;
|
||||
h )
|
||||
usage
|
||||
;;
|
||||
\? )
|
||||
echo "Invalid option: $OPTARG" 1>&2
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Function to apply upload bandwidth limit using tc
|
||||
apply_bandwidth_limit() {
|
||||
echo -n "Applying upload bandwidth limit of $UPLOAD_BANDWIDTH on interface $INTERFACE..."
|
||||
if tc qdisc add dev $INTERFACE root tbf rate $UPLOAD_BANDWIDTH burst 32kbit latency 400ms >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
remove_bandwidth_limit
|
||||
|
||||
# Try to reapply the limit
|
||||
echo -n "Reapplying upload bandwidth limit of $UPLOAD_BANDWIDTH on interface $INTERFACE..."
|
||||
if tc qdisc add dev $INTERFACE root tbf rate $UPLOAD_BANDWIDTH burst 32kbit latency 400ms >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
echo "Failed to apply bandwidth limit on $INTERFACE. Exiting."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to check if the bandwidth limit is set
|
||||
is_bandwidth_limit_set() {
|
||||
tc qdisc show dev $INTERFACE | grep -q 'tbf'
|
||||
}
|
||||
|
||||
# Function to remove the bandwidth limit using tc if it is set
|
||||
remove_bandwidth_limit() {
|
||||
if is_bandwidth_limit_set; then
|
||||
echo -n "Removing upload bandwidth limit on interface $INTERFACE..."
|
||||
if tc qdisc del dev $INTERFACE root; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
echo "###########################"
|
||||
echo "# T-Pot Image Builder"
|
||||
echo "###########################"
|
||||
echo
|
||||
|
||||
# Check if 'mybuilder' exists, and ensure it's running with bootstrap
|
||||
echo -n "Checking if buildx builder 'mybuilder' exists and is running..."
|
||||
if ! docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
|
||||
echo
|
||||
echo -n " Creating and starting buildx builder 'mybuilder'..."
|
||||
if docker buildx create --name mybuilder --driver docker-container --use >/dev/null 2>&1 && \
|
||||
docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
fi
|
||||
|
||||
# Ensure arm64 and amd64 platforms are active
|
||||
echo -n "Ensuring 'mybuilder' supports linux/arm64 and linux/amd64..."
|
||||
|
||||
# Get active platforms from buildx
|
||||
active_platforms=$(docker buildx inspect mybuilder --bootstrap | grep -oP '(?<=Platforms: ).*')
|
||||
|
||||
if [[ "$active_platforms" == *"linux/arm64"* && "$active_platforms" == *"linux/amd64"* ]]; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo
|
||||
echo -n " Enabling platforms linux/arm64 and linux/amd64..."
|
||||
if docker buildx create --name mybuilder --driver docker-container --use --platform linux/amd64,linux/arm64 >/dev/null 2>&1 && \
|
||||
docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Ensure QEMU is set up for cross-platform builds
|
||||
echo -n "Ensuring QEMU is configured for cross-platform builds..."
|
||||
if docker run --rm --privileged multiarch/qemu-user-static --reset -p yes > /dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
fi
|
||||
|
||||
# Apply bandwidth limit only if pushing images
|
||||
if $PUSH_IMAGES; then
|
||||
echo
|
||||
echo "########################################"
|
||||
echo "# Setting Upload Bandwidth limit ..."
|
||||
echo "########################################"
|
||||
echo
|
||||
apply_bandwidth_limit
|
||||
fi
|
||||
|
||||
# Trap to ensure bandwidth limit is removed on script error, exit
|
||||
trap_cleanup() {
|
||||
if is_bandwidth_limit_set; then
|
||||
remove_bandwidth_limit
|
||||
fi
|
||||
}
|
||||
trap trap_cleanup INT ERR EXIT
|
||||
|
||||
echo
|
||||
echo "################################"
|
||||
echo "# Now building images ..."
|
||||
echo "################################"
|
||||
echo
|
||||
|
||||
mkdir -p log
|
||||
|
||||
# List of services to build
|
||||
services=$(docker compose config --services | sort)
|
||||
|
||||
# Loop through each service to build
|
||||
echo $services | tr ' ' '\n' | xargs -I {} -P $PARALLELBUILDS bash -c '
|
||||
echo "Building image: {}" && \
|
||||
build_cmd="docker compose build {}" && \
|
||||
if '$PUSH_IMAGES'; then \
|
||||
build_cmd="$build_cmd --push"; \
|
||||
fi && \
|
||||
if '$NO_CACHE'; then \
|
||||
build_cmd="$build_cmd --no-cache"; \
|
||||
fi && \
|
||||
eval "$build_cmd 2>&1 > log/{}.log" && \
|
||||
echo -e "Image {}: ['$GREEN'OK'$NC']" || \
|
||||
echo -e "Image {}: ['$RED'FAIL'$NC']"
|
||||
'
|
||||
|
||||
# Remove bandwidth limit if it was applied
|
||||
if is_bandwidth_limit_set; then
|
||||
echo
|
||||
echo "########################################"
|
||||
echo "# Removiong Upload Bandwidth limit ..."
|
||||
echo "########################################"
|
||||
echo
|
||||
remove_bandwidth_limit
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "#######################################################"
|
||||
echo "# Done."
|
||||
if ! "$PUSH_IMAGES"; then
|
||||
echo "# Remeber to push the images using push option."
|
||||
fi
|
||||
echo "#######################################################"
|
||||
echo
|
421
docker/_builder/docker-compose.yml
Normal file
|
@ -0,0 +1,421 @@
|
|||
# T-Pot Docker Compose Image Builder (use only for building docker images)
|
||||
# Settings in .env
|
||||
|
||||
##################
|
||||
#### Anchors
|
||||
##################
|
||||
|
||||
# Common build config
|
||||
x-common-build: &common-build
|
||||
dockerfile: ./Dockerfile
|
||||
platforms:
|
||||
- ${TPOT_AMD64}
|
||||
- ${TPOT_ARM64}
|
||||
|
||||
services:
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Adbhoney
|
||||
adbhoney:
|
||||
image: ${TPOT_DOCKER_REPO}/adbhoney:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/adbhoney:${TPOT_VERSION}
|
||||
context: ../adbhoney/
|
||||
<<: *common-build
|
||||
|
||||
# Beelzebub
|
||||
beelzebub:
|
||||
image: ${TPOT_DOCKER_REPO}/beelzebub:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/beelzebub:${TPOT_VERSION}
|
||||
context: ../beelzebub/
|
||||
<<: *common-build
|
||||
|
||||
# Ciscoasa
|
||||
ciscoasa:
|
||||
image: ${TPOT_DOCKER_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
context: ../ciscoasa/
|
||||
<<: *common-build
|
||||
|
||||
# Citrixhoneypot
|
||||
citrixhoneypot:
|
||||
image: ${TPOT_DOCKER_REPO}/citrixhoneypot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/citrixhoneypot:${TPOT_VERSION}
|
||||
context: ../citrixhoneypot/
|
||||
<<: *common-build
|
||||
|
||||
# Conpot
|
||||
conpot:
|
||||
image: ${TPOT_DOCKER_REPO}/conpot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/conpot:${TPOT_VERSION}
|
||||
context: ../conpot/
|
||||
<<: *common-build
|
||||
|
||||
# Cowrie
|
||||
cowrie:
|
||||
image: ${TPOT_DOCKER_REPO}/cowrie:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/cowrie:${TPOT_VERSION}
|
||||
context: ../cowrie/
|
||||
<<: *common-build
|
||||
|
||||
# Ddospot
|
||||
ddospot:
|
||||
image: ${TPOT_DOCKER_REPO}/ddospot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/ddospot:${TPOT_VERSION}
|
||||
context: ../ddospot/
|
||||
<<: *common-build
|
||||
|
||||
# Dicompot
|
||||
dicompot:
|
||||
image: ${TPOT_DOCKER_REPO}/dicompot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/dicompot:${TPOT_VERSION}
|
||||
context: ../dicompot/
|
||||
<<: *common-build
|
||||
|
||||
# Dionaea
|
||||
dionaea:
|
||||
image: ${TPOT_DOCKER_REPO}/dionaea:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/dionaea:${TPOT_VERSION}
|
||||
context: ../dionaea/
|
||||
<<: *common-build
|
||||
|
||||
# Elasticpot
|
||||
elasticpot:
|
||||
image: ${TPOT_DOCKER_REPO}/elasticpot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/elasticpot:${TPOT_VERSION}
|
||||
context: ../elasticpot/
|
||||
<<: *common-build
|
||||
|
||||
# Endlessh
|
||||
endlessh:
|
||||
image: ${TPOT_DOCKER_REPO}/endlessh:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/endlessh:${TPOT_VERSION}
|
||||
context: ../endlessh/
|
||||
<<: *common-build
|
||||
|
||||
# Galah
|
||||
galah:
|
||||
image: ${TPOT_DOCKER_REPO}/galah:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/galah:${TPOT_VERSION}
|
||||
context: ../galah/
|
||||
<<: *common-build
|
||||
|
||||
# Glutton
|
||||
glutton:
|
||||
image: ${TPOT_DOCKER_REPO}/glutton:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/glutton:${TPOT_VERSION}
|
||||
context: ../glutton/
|
||||
<<: *common-build
|
||||
|
||||
# Go-pot
|
||||
go-pot:
|
||||
image: ${TPOT_DOCKER_REPO}/go-pot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/go-pot:${TPOT_VERSION}
|
||||
context: ../go-pot/
|
||||
<<: *common-build
|
||||
|
||||
# H0neytr4p
|
||||
h0neytr4p:
|
||||
image: ${TPOT_DOCKER_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
context: ../h0neytr4p/
|
||||
<<: *common-build
|
||||
|
||||
# Hellpot
|
||||
hellpot:
|
||||
image: ${TPOT_DOCKER_REPO}/hellpot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/hellpot:${TPOT_VERSION}
|
||||
context: ../hellpot/
|
||||
<<: *common-build
|
||||
|
||||
# Herlading
|
||||
heralding:
|
||||
image: ${TPOT_DOCKER_REPO}/heralding:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/heralding:${TPOT_VERSION}
|
||||
context: ../heralding/
|
||||
<<: *common-build
|
||||
|
||||
# Honeyaml
|
||||
honeyaml:
|
||||
image: ${TPOT_DOCKER_REPO}/honeyaml:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/honeyaml:${TPOT_VERSION}
|
||||
context: ../honeyaml/
|
||||
<<: *common-build
|
||||
|
||||
# Honeypots
|
||||
honeypots:
|
||||
image: ${TPOT_DOCKER_REPO}/honeypots:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/honeypots:${TPOT_VERSION}
|
||||
context: ../honeypots/
|
||||
<<: *common-build
|
||||
|
||||
# Honeytrap
|
||||
honeytrap:
|
||||
image: ${TPOT_DOCKER_REPO}/honeytrap:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/honeytrap:${TPOT_VERSION}
|
||||
context: ../honeytrap/
|
||||
<<: *common-build
|
||||
|
||||
# Ipphoney
|
||||
ipphoney:
|
||||
image: ${TPOT_DOCKER_REPO}/ipphoney:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/ipphoney:${TPOT_VERSION}
|
||||
context: ../ipphoney/
|
||||
<<: *common-build
|
||||
|
||||
# Log4pot
|
||||
log4pot:
|
||||
image: ${TPOT_DOCKER_REPO}/log4pot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/log4pot:${TPOT_VERSION}
|
||||
context: ../log4pot/
|
||||
<<: *common-build
|
||||
|
||||
# Mailoney
|
||||
mailoney:
|
||||
image: ${TPOT_DOCKER_REPO}/mailoney:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/mailoney:${TPOT_VERSION}
|
||||
context: ../mailoney/
|
||||
<<: *common-build
|
||||
|
||||
# Medpot
|
||||
medpot:
|
||||
image: ${TPOT_DOCKER_REPO}/medpot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/medpot:${TPOT_VERSION}
|
||||
context: ../medpot/
|
||||
<<: *common-build
|
||||
|
||||
# Miniprint
|
||||
miniprint:
|
||||
image: ${TPOT_DOCKER_REPO}/miniprint:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/miniprint:${TPOT_VERSION}
|
||||
context: ../miniprint/
|
||||
<<: *common-build
|
||||
|
||||
# Redishoneypot
|
||||
redishoneypot:
|
||||
image: ${TPOT_DOCKER_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
context: ../redishoneypot/
|
||||
<<: *common-build
|
||||
|
||||
# Sentrypeer
|
||||
sentrypeer:
|
||||
image: ${TPOT_DOCKER_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
context: ../sentrypeer/
|
||||
<<: *common-build
|
||||
|
||||
#### Snare / Tanner
|
||||
## Tanner Redis
|
||||
redis:
|
||||
image: ${TPOT_DOCKER_REPO}/redis:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/redis:${TPOT_VERSION}
|
||||
context: ../tanner/redis/
|
||||
<<: *common-build
|
||||
|
||||
## PHP Sandbox
|
||||
phpox:
|
||||
image: ${TPOT_DOCKER_REPO}/phpox:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/phpox:${TPOT_VERSION}
|
||||
context: ../tanner/phpox/
|
||||
<<: *common-build
|
||||
|
||||
## Tanner
|
||||
tanner:
|
||||
image: ${TPOT_DOCKER_REPO}/tanner:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/tanner:${TPOT_VERSION}
|
||||
context: ../tanner/tanner/
|
||||
<<: *common-build
|
||||
|
||||
## Snare
|
||||
snare:
|
||||
image: ${TPOT_DOCKER_REPO}/snare:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/snare:${TPOT_VERSION}
|
||||
context: ../tanner/snare/
|
||||
<<: *common-build
|
||||
####
|
||||
|
||||
# Wordpot
|
||||
wordpot:
|
||||
image: ${TPOT_DOCKER_REPO}/wordpot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/wordpot:${TPOT_VERSION}
|
||||
context: ../wordpot/
|
||||
<<: *common-build
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt
|
||||
fatt:
|
||||
image: ${TPOT_DOCKER_REPO}/fatt:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/fatt:${TPOT_VERSION}
|
||||
context: ../fatt/
|
||||
<<: *common-build
|
||||
|
||||
# P0f
|
||||
p0f:
|
||||
image: ${TPOT_DOCKER_REPO}/p0f:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/p0f:${TPOT_VERSION}
|
||||
context: ../p0f/
|
||||
<<: *common-build
|
||||
|
||||
# Suricata
|
||||
suricata:
|
||||
image: ${TPOT_DOCKER_REPO}/suricata:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/suricata:${TPOT_VERSION}
|
||||
context: ../suricata/
|
||||
<<: *common-build
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
# T-Pot Init
|
||||
tpotinit:
|
||||
image: ${TPOT_DOCKER_REPO}/tpotinit:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/tpotinit:${TPOT_VERSION}
|
||||
context: ../tpotinit/
|
||||
<<: *common-build
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch
|
||||
elasticsearch:
|
||||
image: ${TPOT_DOCKER_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
context: ../elk/elasticsearch/
|
||||
<<: *common-build
|
||||
|
||||
## Kibana
|
||||
kibana:
|
||||
image: ${TPOT_DOCKER_REPO}/kibana:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/kibana:${TPOT_VERSION}
|
||||
context: ../elk/kibana/
|
||||
<<: *common-build
|
||||
|
||||
## Logstash
|
||||
logstash:
|
||||
image: ${TPOT_DOCKER_REPO}/logstash:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/logstash:${TPOT_VERSION}
|
||||
context: ../elk/logstash/
|
||||
<<: *common-build
|
||||
|
||||
## Map Web
|
||||
map:
|
||||
image: ${TPOT_DOCKER_REPO}/map:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/map:${TPOT_VERSION}
|
||||
context: ../elk/map/
|
||||
<<: *common-build
|
||||
####
|
||||
|
||||
# Ewsposter
|
||||
ewsposter:
|
||||
image: ${TPOT_DOCKER_REPO}/ewsposter:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/ewsposter:${TPOT_VERSION}
|
||||
context: ../ewsposter/
|
||||
<<: *common-build
|
||||
|
||||
# Nginx
|
||||
nginx:
|
||||
image: ${TPOT_DOCKER_REPO}/nginx:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/nginx:${TPOT_VERSION}
|
||||
context: ../nginx/
|
||||
<<: *common-build
|
||||
|
||||
# Spiderfoot
|
||||
spiderfoot:
|
||||
image: ${TPOT_DOCKER_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
context: ../spiderfoot/
|
||||
<<: *common-build
|
||||
|
99
docker/_builder/setup_builder.sh
Executable file
|
@ -0,0 +1,99 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# ANSI color codes for green (OK) and red (FAIL)
|
||||
BLUE='\033[0;34m'
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Check if the user is in the docker group
|
||||
if ! groups $(whoami) | grep &>/dev/null '\bdocker\b'; then
|
||||
echo -e "${RED}You need to be in the docker group to run this script without root privileges.${NC}"
|
||||
echo "Please run the following command to add yourself to the docker group:"
|
||||
echo " sudo usermod -aG docker $(whoami)"
|
||||
echo "Then log out and log back in or run the script with sudo."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Command-line switch check
|
||||
if [ "$1" != "-y" ]; then
|
||||
echo "### Setting up Docker for Multi-Arch Builds."
|
||||
echo "### Requires Docker packages from https://get.docker.com/"
|
||||
echo "### Use on x64 only!"
|
||||
echo "### Run with -y if you fit the requirements!"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if the mybuilder exists and is running
|
||||
echo -n "Checking if buildx builder 'mybuilder' exists and is running..."
|
||||
if ! docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
|
||||
echo
|
||||
echo -n " Creating and starting buildx builder 'mybuilder'..."
|
||||
if docker buildx create --name mybuilder --driver docker-container --use >/dev/null 2>&1 && \
|
||||
docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
fi
|
||||
|
||||
# Ensure QEMU is set up for cross-platform builds
|
||||
echo -n "Ensuring QEMU is configured for cross-platform builds..."
|
||||
if docker run --rm --privileged multiarch/qemu-user-static --reset -p yes >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure arm64 and amd64 platforms are active
|
||||
echo -n "Ensuring 'mybuilder' supports linux/arm64 and linux/amd64..."
|
||||
active_platforms=$(docker buildx inspect mybuilder --bootstrap | grep -oP '(?<=Platforms: ).*')
|
||||
|
||||
if [[ "$active_platforms" == *"linux/arm64"* && "$active_platforms" == *"linux/amd64"* ]]; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo
|
||||
echo -n " Enabling platforms linux/arm64 and linux/amd64..."
|
||||
if docker buildx create --name mybuilder --driver docker-container --use --platform linux/amd64,linux/arm64 >/dev/null 2>&1 && \
|
||||
docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo
|
||||
echo -e "${BLUE}### Done.${NC}"
|
||||
echo
|
||||
echo -e "${BLUE}Examples:${NC}"
|
||||
echo -e " ${BLUE}Manual multi-arch build:${NC}"
|
||||
echo " docker buildx build --platform linux/amd64,linux/arm64 -t username/demo:latest --push ."
|
||||
echo
|
||||
echo -e " ${BLUE}Documentation:${NC} https://docs.docker.com/desktop/multi-arch/"
|
||||
echo
|
||||
echo -e " ${BLUE}Build release with Docker Compose:${NC}"
|
||||
echo " docker compose build"
|
||||
echo
|
||||
echo -e " ${BLUE}Build and push release with Docker Compose:${NC}"
|
||||
echo " docker compose build --push"
|
||||
echo
|
||||
echo -e " ${BLUE}Build a single image with Docker Compose:${NC}"
|
||||
echo " docker compose build tpotinit"
|
||||
echo
|
||||
echo -e " ${BLUE}Build and push a single image with Docker Compose:${NC}"
|
||||
echo " docker compose build tpotinit --push"
|
||||
echo
|
||||
echo -e "${BLUE}Resolve buildx issues:${NC}"
|
||||
echo " docker buildx create --use --name mybuilder"
|
||||
echo " docker buildx inspect mybuilder --bootstrap"
|
||||
echo " docker login -u <username>"
|
||||
echo " docker login ghcr.io -u <username>"
|
||||
echo
|
||||
echo -e "${BLUE}Fix segmentation faults when building arm64 images:${NC}"
|
||||
echo " docker run --rm --privileged multiarch/qemu-user-static --reset -p yes"
|
||||
echo
|
|
@ -1,37 +1,35 @@
|
|||
FROM alpine:latest
|
||||
FROM alpine:3.20 AS builder
|
||||
#
|
||||
# Include dist
|
||||
ADD dist/ /root/dist/
|
||||
COPY dist/ /root/dist/
|
||||
#
|
||||
# Install packages
|
||||
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
|
||||
apk -U add \
|
||||
git \
|
||||
libcap \
|
||||
py3-pip \
|
||||
python3 \
|
||||
python3-dev && \
|
||||
RUN apk --no-cache -U upgrade && \
|
||||
apk --no-cache -U add \
|
||||
build-base \
|
||||
git \
|
||||
procps \
|
||||
py3-psutil \
|
||||
py3-requests \
|
||||
py3-pip \
|
||||
python3 && \
|
||||
pip3 install --break-system-packages pyinstaller && \
|
||||
#
|
||||
# Install adbhoney from git
|
||||
git clone --depth=1 https://github.com/huuck/ADBHoney /opt/adbhoney && \
|
||||
git clone https://github.com/t3chn0m4g3/ADBHoney /opt/adbhoney && \
|
||||
cd /opt/adbhoney && \
|
||||
git checkout 42a73cd8a82ddd4d137de70ac37b1a8b2e3e0119 && \
|
||||
cp /root/dist/adbhoney.cfg /opt/adbhoney && \
|
||||
sed -i 's/dst_ip/dest_ip/' /opt/adbhoney/adbhoney/core.py && \
|
||||
sed -i 's/dst_port/dest_port/' /opt/adbhoney/adbhoney/core.py && \
|
||||
pyinstaller adbhoney.spec
|
||||
#
|
||||
# Setup user, groups and configs
|
||||
addgroup -g 2000 adbhoney && \
|
||||
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 adbhoney && \
|
||||
chown -R adbhoney:adbhoney /opt/adbhoney && \
|
||||
setcap cap_net_bind_service=+ep /usr/bin/python3.8 && \
|
||||
#
|
||||
# Clean up
|
||||
apk del --purge git \
|
||||
python3-dev && \
|
||||
rm -rf /root/* && \
|
||||
rm -rf /var/cache/apk/*
|
||||
FROM alpine:3.20
|
||||
RUN apk --no-cache -U upgrade
|
||||
COPY --from=builder /opt/adbhoney/dist/adbhoney/ /opt/adbhoney/
|
||||
#
|
||||
# Set workdir and start adbhoney
|
||||
STOPSIGNAL SIGINT
|
||||
USER adbhoney:adbhoney
|
||||
USER 2000:2000
|
||||
WORKDIR /opt/adbhoney/
|
||||
CMD nohup /usr/bin/python3 run.py
|
||||
CMD ["./adbhoney"]
|
||||
|
|
2
docker/adbhoney/dist/adbhoney.cfg
vendored
|
@ -3,6 +3,8 @@ hostname = honeypot01
|
|||
|
||||
address = 0.0.0.0
|
||||
port = 5555
|
||||
http_download = true
|
||||
http_timeout = 45
|
||||
|
||||
download_dir = dl/
|
||||
log_dir = log/
|
||||
|
|
42
docker/adbhoney/dist/cpu_check.py
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
import psutil
|
||||
import sys
|
||||
import time
|
||||
|
||||
if len(sys.argv) != 3:
|
||||
print("Usage: cpu_check.py <PID> <CPU_USAGE_THRESHOLD>")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
pid = int(sys.argv[1])
|
||||
except ValueError:
|
||||
print("Please provide a valid integer value for the PID.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
cpu_threshold = float(sys.argv[2])
|
||||
except ValueError:
|
||||
print("Please provide a valid number for the CPU usage threshold.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
target_process = psutil.Process(pid)
|
||||
except psutil.NoSuchProcess:
|
||||
print(f"No process with the PID {pid} was found.")
|
||||
sys.exit(1)
|
||||
|
||||
# Prepare to calculate the average CPU usage over 3 intervals of 1 second each
|
||||
cpu_usages = []
|
||||
for _ in range(3):
|
||||
cpu_usages.append(target_process.cpu_percent(interval=1))
|
||||
|
||||
# Calculate the average CPU usage
|
||||
average_cpu_usage = sum(cpu_usages) / len(cpu_usages)
|
||||
print(f"Average CPU Usage of PID {pid} over 3 seconds: {average_cpu_usage}%")
|
||||
|
||||
# Check average CPU usage against the threshold
|
||||
if average_cpu_usage >= cpu_threshold:
|
||||
print(f"Average CPU usage of PID {pid} is above or equal to the threshold of {cpu_threshold}%.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(f"Average CPU usage of PID {pid} is below the threshold of {cpu_threshold}%. Exiting with code 0.")
|
||||
sys.exit(0)
|
|
@ -1,5 +1,3 @@
|
|||
version: '2.3'
|
||||
|
||||
networks:
|
||||
adbhoney_local:
|
||||
|
||||
|
@ -10,12 +8,14 @@ services:
|
|||
build: .
|
||||
container_name: adbhoney
|
||||
restart: always
|
||||
# cpu_count: 1
|
||||
# cpus: 0.25
|
||||
networks:
|
||||
- adbhoney_local
|
||||
ports:
|
||||
- "5555:5555"
|
||||
image: "dtagdevsec/adbhoney:2006"
|
||||
image: "dtagdevsec/adbhoney:24.04"
|
||||
read_only: true
|
||||
volumes:
|
||||
- /data/adbhoney/log:/opt/adbhoney/log
|
||||
- /data/adbhoney/downloads:/opt/adbhoney/dl
|
||||
- $HOME/tpotce/data/adbhoney/log:/opt/adbhoney/log
|
||||
- $HOME/tpotce/data/adbhoney/downloads:/opt/adbhoney/dl
|
||||
|
|
31
docker/beelzebub/Dockerfile
Normal file
|
@ -0,0 +1,31 @@
|
|||
FROM golang:1.23-alpine AS builder
|
||||
#
|
||||
ENV GO111MODULE=on \
|
||||
CGO_ENABLED=0 \
|
||||
GOOS=linux
|
||||
#
|
||||
# Install packages
|
||||
RUN apk -U add git
|
||||
#
|
||||
WORKDIR /root
|
||||
#
|
||||
# Build beelzebub
|
||||
RUN git clone https://github.com/t3chn0m4g3/beelzebub && \
|
||||
cd beelzebub && \
|
||||
git checkout 0b9aba53ec1671f669d22782758142a1d411b858
|
||||
WORKDIR /root/beelzebub
|
||||
RUN go mod download
|
||||
RUN go build -o main .
|
||||
RUN sed -i "s#logsPath: ./log#logsPath: ./configurations/log/beelzebub.json#g" /root/beelzebub/configurations/beelzebub.yaml
|
||||
RUN sed -i 's/passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$"/passwordRegex: ".*"/g' /root/beelzebub/configurations/services/ssh-22.yaml
|
||||
#
|
||||
FROM scratch
|
||||
#
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=builder /root/beelzebub/main /opt/beelzebub/
|
||||
COPY --from=builder /root/beelzebub/configurations /opt/beelzebub/configurations
|
||||
#
|
||||
# Start beelzebub
|
||||
WORKDIR /opt/beelzebub
|
||||
USER 2000:2000
|
||||
ENTRYPOINT ["./main"]
|
29
docker/beelzebub/docker-compose.yml
Normal file
|
@ -0,0 +1,29 @@
|
|||
networks:
|
||||
beelzebub_local:
|
||||
|
||||
services:
|
||||
|
||||
# Beelzebub service
|
||||
beelzebub:
|
||||
build: .
|
||||
container_name: beelzebub
|
||||
restart: always
|
||||
# cpu_count: 1
|
||||
# cpus: 0.25
|
||||
networks:
|
||||
- beelzebub_local
|
||||
ports:
|
||||
- "22:22"
|
||||
- "80:80"
|
||||
- "2222:2222"
|
||||
- "3306:3306"
|
||||
- "8080:8080"
|
||||
environment:
|
||||
LLM_MODEL: "ollama"
|
||||
LLM_HOST: "http://ollama.local:11434/api/chat"
|
||||
OLLAMA_MODEL: "openchat"
|
||||
image: "ghcr.io/telekom-security/beelzebub:24.04.1"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/beelzebub/key:/opt/beelzebub/configurations/key
|
||||
- $HOME/tpotce/data/beelzebub/log:/opt/beelzebub/configurations/log
|
|
@ -1,45 +1,36 @@
|
|||
FROM alpine:latest
|
||||
FROM alpine:3.20 AS builder
|
||||
#
|
||||
# Include dist
|
||||
ADD dist/ /root/dist/
|
||||
#
|
||||
# Setup env and apt
|
||||
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
|
||||
apk -U upgrade && \
|
||||
apk add build-base \
|
||||
git \
|
||||
libffi \
|
||||
libffi-dev \
|
||||
openssl \
|
||||
openssl-dev \
|
||||
py3-pip \
|
||||
python3 \
|
||||
python3-dev && \
|
||||
#
|
||||
# Setup user
|
||||
addgroup -g 2000 ciscoasa && \
|
||||
adduser -S -s /bin/bash -u 2000 -D -g 2000 ciscoasa && \
|
||||
# Install packages
|
||||
RUN apk --no-cache -U upgrade && \
|
||||
apk --no-cache -U add \
|
||||
build-base \
|
||||
git \
|
||||
libffi \
|
||||
libffi-dev \
|
||||
openssl \
|
||||
openssl-dev \
|
||||
py3-pip \
|
||||
python3 \
|
||||
python3-dev && \
|
||||
#
|
||||
# Get and install packages
|
||||
mkdir -p /opt/ && \
|
||||
mkdir -p /opt/ && \
|
||||
cd /opt/ && \
|
||||
git clone --depth=1 https://github.com/cymmetria/ciscoasa_honeypot && \
|
||||
git clone https://github.com/t3chn0m4g3/ciscoasa_honeypot && \
|
||||
cd ciscoasa_honeypot && \
|
||||
pip3 install --no-cache-dir -r requirements.txt && \
|
||||
cp /root/dist/asa_server.py /opt/ciscoasa_honeypot && \
|
||||
chown -R ciscoasa:ciscoasa /opt/ciscoasa_honeypot && \
|
||||
git checkout 4bd2795cfa14320a87c00b7159fa3b7d6a8ba254 && \
|
||||
sed -i "s/git+git/git+https/g" requirements.txt && \
|
||||
pip3 install --break-system-packages pyinstaller && \
|
||||
pip3 install --break-system-packages --no-cache-dir -r requirements.txt
|
||||
WORKDIR /opt/ciscoasa_honeypot
|
||||
RUN pyinstaller asa_server.py --add-data "./asa:./asa"
|
||||
#
|
||||
# Clean up
|
||||
apk del --purge build-base \
|
||||
git \
|
||||
libffi-dev \
|
||||
openssl-dev \
|
||||
python3-dev && \
|
||||
rm -rf /root/* && \
|
||||
rm -rf /var/cache/apk/*
|
||||
FROM alpine:3.20
|
||||
RUN apk --no-cache -U upgrade
|
||||
COPY --from=builder /opt/ciscoasa_honeypot/dist/ /opt/
|
||||
#
|
||||
# Start ciscoasa
|
||||
STOPSIGNAL SIGINT
|
||||
WORKDIR /tmp/ciscoasa/
|
||||
USER ciscoasa:ciscoasa
|
||||
CMD cp -R /opt/ciscoasa_honeypot/* /tmp/ciscoasa && exec python3 asa_server.py --ike-port 5000 --enable_ssl --port 8443 --verbose >> /var/log/ciscoasa/ciscoasa.log 2>&1
|
||||
WORKDIR /opt/asa_server/
|
||||
USER 2000:2000
|
||||
CMD ./asa_server --ike-port 5000 --enable_ssl --port 8443 --verbose >> /var/log/ciscoasa/ciscoasa.log 2>&1
|
||||
|
|
307
docker/ciscoasa/dist/asa_server.py
vendored
|
@ -1,307 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import time
|
||||
import socket
|
||||
import logging
|
||||
logging.basicConfig(format='%(message)s')
|
||||
import threading
|
||||
from io import BytesIO
|
||||
from xml.etree import ElementTree
|
||||
from http.server import HTTPServer
|
||||
from socketserver import ThreadingMixIn
|
||||
from http.server import SimpleHTTPRequestHandler
|
||||
import ike_server
|
||||
import datetime
|
||||
|
||||
|
||||
class NonBlockingHTTPServer(ThreadingMixIn, HTTPServer):
|
||||
pass
|
||||
|
||||
class hpflogger:
|
||||
def __init__(self, hpfserver, hpfport, hpfident, hpfsecret, hpfchannel, serverid, verbose):
|
||||
self.hpfserver=hpfserver
|
||||
self.hpfport=hpfport
|
||||
self.hpfident=hpfident
|
||||
self.hpfsecret=hpfsecret
|
||||
self.hpfchannel=hpfchannel
|
||||
self.serverid=serverid
|
||||
self.hpc=None
|
||||
self.verbose=verbose
|
||||
if (self.hpfserver and self.hpfport and self.hpfident and self.hpfport and self.hpfchannel and self.serverid):
|
||||
import hpfeeds
|
||||
try:
|
||||
self.hpc = hpfeeds.new(self.hpfserver, self.hpfport, self.hpfident, self.hpfsecret)
|
||||
logger.debug("Logging to hpfeeds using server: {0}, channel {1}.".format(self.hpfserver, self.hpfchannel))
|
||||
except (hpfeeds.FeedException, socket.error, hpfeeds.Disconnect):
|
||||
logger.critical("hpfeeds connection not successful")
|
||||
|
||||
def log(self, level, message):
|
||||
if self.hpc:
|
||||
if level in ['debug', 'info'] and not self.verbose:
|
||||
return
|
||||
self.hpc.publish(self.hpfchannel, "["+self.serverid+"] ["+level+"] ["+datetime.datetime.now().isoformat() +"] " + str(message))
|
||||
|
||||
|
||||
def header_split(h):
|
||||
return [list(map(str.strip, l.split(': ', 1))) for l in h.strip().splitlines()]
|
||||
|
||||
|
||||
class WebLogicHandler(SimpleHTTPRequestHandler):
|
||||
logger = None
|
||||
hpfl = None
|
||||
|
||||
protocol_version = "HTTP/1.1"
|
||||
|
||||
EXPLOIT_STRING = b"host-scan-reply"
|
||||
RESPONSE = b"""<?xml version="1.0" encoding="UTF-8"?>
|
||||
<config-auth client="vpn" type="complete">
|
||||
<version who="sg">9.0(1)</version>
|
||||
<error id="98" param1="" param2="">VPN Server could not parse request.</error>
|
||||
</config-auth>"""
|
||||
|
||||
basepath = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
alert_function = None
|
||||
|
||||
def setup(self):
|
||||
SimpleHTTPRequestHandler.setup(self)
|
||||
self.request.settimeout(3)
|
||||
|
||||
def send_header(self, keyword, value):
|
||||
if keyword.lower() == 'server':
|
||||
return
|
||||
SimpleHTTPRequestHandler.send_header(self, keyword, value)
|
||||
|
||||
def send_head(self):
|
||||
# send_head will return a file object that do_HEAD/GET will use
|
||||
# do_GET/HEAD are already implemented by SimpleHTTPRequestHandler
|
||||
filename = os.path.basename(self.path.rstrip('/').split('?', 1)[0])
|
||||
|
||||
if self.path == '/':
|
||||
self.send_response(200)
|
||||
for k, v in header_split("""
|
||||
Content-Type: text/html
|
||||
Cache-Control: no-cache
|
||||
Pragma: no-cache
|
||||
Set-Cookie: tg=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
|
||||
Set-Cookie: webvpn=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
|
||||
Set-Cookie: webvpnc=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
|
||||
Set-Cookie: webvpn_portal=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
|
||||
Set-Cookie: webvpnSharePoint=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
|
||||
Set-Cookie: webvpnlogin=1; path=/; secure
|
||||
Set-Cookie: sdesktop=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
|
||||
"""):
|
||||
self.send_header(k, v)
|
||||
self.end_headers()
|
||||
return BytesIO(b'<html><script>document.location.replace("/+CSCOE+/logon.html")</script></html>\n')
|
||||
elif filename == 'asa': # don't allow dir listing
|
||||
return self.send_file('wrong_url.html', 403)
|
||||
else:
|
||||
return self.send_file(filename)
|
||||
|
||||
def redirect(self, loc):
|
||||
self.send_response(302)
|
||||
for k, v in header_split("""
|
||||
Content-Type: text/html
|
||||
Content-Length: 0
|
||||
Cache-Control: no-cache
|
||||
Pragma: no-cache
|
||||
Location: %s
|
||||
Set-Cookie: tg=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
|
||||
""" % (loc,)):
|
||||
self.send_header(k, v)
|
||||
self.end_headers()
|
||||
|
||||
def do_GET(self):
|
||||
if self.path == '/+CSCOE+/logon.html':
|
||||
self.redirect('/+CSCOE+/logon.html?fcadbadd=1')
|
||||
return
|
||||
elif self.path.startswith('/+CSCOE+/logon.html?') and 'reason=1' in self.path:
|
||||
self.wfile.write(self.send_file('logon_failure').getvalue())
|
||||
return
|
||||
SimpleHTTPRequestHandler.do_GET(self)
|
||||
|
||||
def do_POST(self):
|
||||
data_len = int(self.headers.get('Content-length', 0))
|
||||
data = self.rfile.read(data_len) if data_len else b''
|
||||
body = self.RESPONSE
|
||||
if self.EXPLOIT_STRING in data:
|
||||
xml = ElementTree.fromstring(data)
|
||||
payloads = []
|
||||
for x in xml.iter('host-scan-reply'):
|
||||
payloads.append(x.text)
|
||||
|
||||
self.alert_function(self.client_address[0], self.client_address[1], payloads)
|
||||
|
||||
elif self.path == '/':
|
||||
self.redirect('/+webvpn+/index.html')
|
||||
return
|
||||
elif self.path == '/+CSCOE+/logon.html':
|
||||
self.redirect('/+CSCOE+/logon.html?fcadbadd=1')
|
||||
return
|
||||
elif self.path.split('?', 1)[0] == '/+webvpn+/index.html':
|
||||
with open(os.path.join(self.basepath, 'asa', "logon_redir.html"), 'rb') as fh:
|
||||
body = fh.read()
|
||||
|
||||
self.send_response(200)
|
||||
self.send_header('Content-Length', int(len(body)))
|
||||
self.send_header('Content-Type', 'text/html; charset=UTF-8')
|
||||
self.end_headers()
|
||||
self.wfile.write(body)
|
||||
return
|
||||
|
||||
def send_file(self, filename, status_code=200, headers=[]):
|
||||
try:
|
||||
with open(os.path.join(self.basepath, 'asa', filename), 'rb') as fh:
|
||||
body = fh.read()
|
||||
self.send_response(status_code)
|
||||
for k, v in headers:
|
||||
self.send_header(k, v)
|
||||
if status_code == 200:
|
||||
for k, v in header_split("""
|
||||
Cache-Control: max-age=0
|
||||
Set-Cookie: webvpn=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
|
||||
Set-Cookie: webvpnc=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
|
||||
Set-Cookie: webvpnlogin=1; secure
|
||||
X-Transcend-Version: 1
|
||||
"""):
|
||||
self.send_header(k, v)
|
||||
self.send_header('Content-Length', int(len(body)))
|
||||
self.send_header('Content-Type', 'text/html')
|
||||
self.end_headers()
|
||||
return BytesIO(body)
|
||||
except IOError:
|
||||
return self.send_file('wrong_url.html', 404)
|
||||
|
||||
def log_message(self, format, *args):
|
||||
self.logger.debug("{'timestamp': '%s', 'src_ip': '%s', 'payload_printable': '%s'}" %
|
||||
(datetime.datetime.now().isoformat(),
|
||||
self.client_address[0],
|
||||
format % args))
|
||||
self.hpfl.log('debug', "%s - - [%s] %s" %
|
||||
(self.client_address[0],
|
||||
self.log_date_time_string(),
|
||||
format % args))
|
||||
|
||||
def handle_one_request(self):
|
||||
"""Handle a single HTTP request.
|
||||
Overriden to not send 501 errors
|
||||
"""
|
||||
self.close_connection = True
|
||||
try:
|
||||
self.raw_requestline = self.rfile.readline(65537)
|
||||
if len(self.raw_requestline) > 65536:
|
||||
self.requestline = ''
|
||||
self.request_version = ''
|
||||
self.command = ''
|
||||
self.close_connection = 1
|
||||
return
|
||||
if not self.raw_requestline:
|
||||
self.close_connection = 1
|
||||
return
|
||||
if not self.parse_request():
|
||||
# An error code has been sent, just exit
|
||||
return
|
||||
mname = 'do_' + self.command
|
||||
if not hasattr(self, mname):
|
||||
self.log_request()
|
||||
self.close_connection = True
|
||||
return
|
||||
method = getattr(self, mname)
|
||||
method()
|
||||
self.wfile.flush() # actually send the response if not already done.
|
||||
except socket.timeout as e:
|
||||
# a read or a write timed out. Discard this connection
|
||||
self.log_error("Request timed out: %r", e)
|
||||
self.close_connection = 1
|
||||
return
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import click
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger()
|
||||
logger.info('info')
|
||||
|
||||
@click.command()
|
||||
@click.option('-h', '--host', default='0.0.0.0', help='Host to listen')
|
||||
@click.option('-p', '--port', default=8443, help='Port to listen', type=click.INT)
|
||||
@click.option('-i', '--ike-port', default=5000, help='Port to listen for IKE', type=click.INT)
|
||||
@click.option('-s', '--enable_ssl', default=False, help='Enable SSL', is_flag=True)
|
||||
@click.option('-c', '--cert', default=None, help='Certificate File Path (will generate self signed '
|
||||
'cert if not supplied)')
|
||||
@click.option('-v', '--verbose', default=False, help='Verbose logging', is_flag=True)
|
||||
|
||||
# hpfeeds options
|
||||
@click.option('--hpfserver', default=os.environ.get('HPFEEDS_SERVER'), help='HPFeeds Server')
|
||||
@click.option('--hpfport', default=os.environ.get('HPFEEDS_PORT'), help='HPFeeds Port', type=click.INT)
|
||||
@click.option('--hpfident', default=os.environ.get('HPFEEDS_IDENT'), help='HPFeeds Ident')
|
||||
@click.option('--hpfsecret', default=os.environ.get('HPFEEDS_SECRET'), help='HPFeeds Secret')
|
||||
@click.option('--hpfchannel', default=os.environ.get('HPFEEDS_CHANNEL'), help='HPFeeds Channel')
|
||||
@click.option('--serverid', default=os.environ.get('SERVERID'), help='Verbose logging')
|
||||
|
||||
|
||||
def start(host, port, ike_port, enable_ssl, cert, verbose, hpfserver, hpfport, hpfident, hpfsecret, hpfchannel, serverid):
|
||||
"""
|
||||
A low interaction honeypot for the Cisco ASA component capable of detecting CVE-2018-0101,
|
||||
a DoS and remote code execution vulnerability
|
||||
"""
|
||||
|
||||
hpfl=hpflogger(hpfserver, hpfport, hpfident, hpfsecret, hpfchannel, serverid, verbose)
|
||||
|
||||
def alert(cls, host, port, payloads):
|
||||
logger.critical({
|
||||
'timestamp': datetime.datetime.utcnow().isoformat(),
|
||||
'src_ip': host,
|
||||
'src_port': port,
|
||||
'payload_printable': payloads,
|
||||
})
|
||||
#log to hpfeeds
|
||||
hpfl.log("critical", {
|
||||
'src': host,
|
||||
'spt': port,
|
||||
'data': payloads,
|
||||
})
|
||||
|
||||
if verbose:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
requestHandler = WebLogicHandler
|
||||
requestHandler.alert_function = alert
|
||||
requestHandler.logger = logger
|
||||
requestHandler.hpfl = hpfl
|
||||
|
||||
def log_date_time_string():
|
||||
"""Return the current time formatted for logging."""
|
||||
now = datetime.datetime.now().isoformat()
|
||||
return now
|
||||
|
||||
def ike():
|
||||
ike_server.start(host, ike_port, alert, logger, hpfl)
|
||||
t = threading.Thread(target=ike)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
httpd = HTTPServer((host, port), requestHandler)
|
||||
if enable_ssl:
|
||||
import ssl
|
||||
if not cert:
|
||||
import gencert
|
||||
cert = gencert.gencert()
|
||||
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=cert, server_side=True)
|
||||
|
||||
logger.info('Starting server on port {:d}/tcp, use <Ctrl-C> to stop'.format(port))
|
||||
hpfl.log('info', 'Starting server on port {:d}/tcp, use <Ctrl-C> to stop'.format(port))
|
||||
|
||||
try:
|
||||
httpd.serve_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
logger.info('Stopping server.')
|
||||
hpfl.log('info', 'Stopping server.')
|
||||
|
||||
httpd.server_close()
|
||||
|
||||
start()
|
|
@ -1,4 +1,5 @@
|
|||
version: '2.3'
|
||||
networks:
|
||||
ciscoasa_local:
|
||||
|
||||
services:
|
||||
|
||||
|
@ -9,11 +10,14 @@ services:
|
|||
restart: always
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
# cpu_count: 1
|
||||
# cpus: 0.25
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: "dtagdevsec/ciscoasa:2006"
|
||||
image: "dtagdevsec/ciscoasa:24.04"
|
||||
read_only: true
|
||||
volumes:
|
||||
- /data/ciscoasa/log:/var/log/ciscoasa
|
||||
- $HOME/tpotce/data/ciscoasa/log:/var/log/ciscoasa
|
||||
|
|
|
@ -1,23 +1,21 @@
|
|||
FROM alpine:latest
|
||||
FROM alpine:3.20 AS builder
|
||||
#
|
||||
# Install packages
|
||||
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
|
||||
apk -U add \
|
||||
git \
|
||||
libcap \
|
||||
openssl \
|
||||
py3-pip \
|
||||
python3 \
|
||||
python3-dev && \
|
||||
#
|
||||
pip3 install --no-cache-dir python-json-logger && \
|
||||
RUN apk --no-cache -U upgrade && \
|
||||
apk --no-cache -U add \
|
||||
build-base \
|
||||
git \
|
||||
openssl \
|
||||
py3-pip \
|
||||
python3 && \
|
||||
pip3 install --break-system-packages --no-cache-dir \
|
||||
pyinstaller \
|
||||
python-json-logger
|
||||
#
|
||||
# Install CitrixHoneypot from GitHub
|
||||
# git clone --depth=1 https://github.com/malwaretech/citrixhoneypot /opt/citrixhoneypot && \
|
||||
# git clone --depth=1 https://github.com/vorband/CitrixHoneypot /opt/citrixhoneypot && \
|
||||
git clone --depth=1 https://github.com/t3chn0m4g3/CitrixHoneypot /opt/citrixhoneypot && \
|
||||
#
|
||||
# Setup user, groups and configs
|
||||
RUN git clone https://github.com/t3chn0m4g3/CitrixHoneypot /opt/citrixhoneypot && \
|
||||
cd /opt/citrixhoneypot && \
|
||||
git checkout dee32447033a0296d053e8f881bf190f9dd7ad44 && \
|
||||
mkdir -p /opt/citrixhoneypot/logs /opt/citrixhoneypot/ssl && \
|
||||
openssl req \
|
||||
-nodes \
|
||||
|
@ -27,20 +25,19 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
|
|||
-out "/opt/citrixhoneypot/ssl/cert.pem" \
|
||||
-days 365 \
|
||||
-subj '/C=AU/ST=Some-State/O=Internet Widgits Pty Ltd' && \
|
||||
addgroup -g 2000 citrixhoneypot && \
|
||||
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 citrixhoneypot && \
|
||||
chown -R citrixhoneypot:citrixhoneypot /opt/citrixhoneypot && \
|
||||
setcap cap_net_bind_service=+ep /usr/bin/python3.8 && \
|
||||
chown 2000:2000 -R ssl/
|
||||
#
|
||||
# Clean up
|
||||
apk del --purge git \
|
||||
openssl \
|
||||
python3-dev && \
|
||||
rm -rf /root/* && \
|
||||
rm -rf /var/cache/apk/*
|
||||
WORKDIR /opt/citrixhoneypot
|
||||
RUN pyinstaller CitrixHoneypot.py
|
||||
#
|
||||
FROM alpine:3.20
|
||||
RUN apk --no-cache -U upgrade
|
||||
COPY --from=builder /opt/citrixhoneypot/dist/CitrixHoneypot/ /opt/citrixhoneypot
|
||||
COPY --from=builder /opt/citrixhoneypot/ssl /opt/citrixhoneypot/ssl
|
||||
COPY --from=builder /opt/citrixhoneypot/responses/ /opt/citrixhoneypot/responses
|
||||
#
|
||||
# Set workdir and start citrixhoneypot
|
||||
STOPSIGNAL SIGINT
|
||||
USER citrixhoneypot:citrixhoneypot
|
||||
USER 2000:2000
|
||||
WORKDIR /opt/citrixhoneypot/
|
||||
CMD nohup /usr/bin/python3 CitrixHoneypot.py
|
||||
CMD nohup ./CitrixHoneypot
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
version: '2.3'
|
||||
|
||||
networks:
|
||||
citrixhoneypot_local:
|
||||
|
||||
|
@ -10,11 +8,13 @@ services:
|
|||
build: .
|
||||
container_name: citrixhoneypot
|
||||
restart: always
|
||||
# cpu_count: 1
|
||||
# cpus: 0.25
|
||||
networks:
|
||||
- citrixhoneypot_local
|
||||
ports:
|
||||
- "443:443"
|
||||
image: "dtagdevsec/citrixhoneypot:2006"
|
||||
image: "dtagdevsec/citrixhoneypot:24.04"
|
||||
read_only: true
|
||||
volumes:
|
||||
- /data/citrixhoneypot/logs:/opt/citrixhoneypot/logs
|
||||
- $HOME/tpotce/data/citrixhoneypot/log:/opt/citrixhoneypot/logs
|
||||
|
|
|
@ -1,35 +1,57 @@
|
|||
FROM alpine:latest
|
||||
FROM alpine:3.19
|
||||
#
|
||||
# Include dist
|
||||
ADD dist/ /root/dist/
|
||||
COPY dist/ /root/dist/
|
||||
#
|
||||
# Setup apt
|
||||
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
|
||||
apk -U add \
|
||||
build-base \
|
||||
file \
|
||||
git \
|
||||
libev \
|
||||
libtool \
|
||||
libcap \
|
||||
libffi-dev \
|
||||
libxslt \
|
||||
libxslt-dev \
|
||||
mariadb-dev \
|
||||
pkgconfig \
|
||||
py3-pip \
|
||||
python3 \
|
||||
python3-dev \
|
||||
py-cffi \
|
||||
py-cryptography \
|
||||
tcpdump \
|
||||
wget && \
|
||||
# Install packages
|
||||
RUN apk --no-cache -U upgrade && \
|
||||
apk --no-cache -U add \
|
||||
build-base \
|
||||
cython \
|
||||
file \
|
||||
git \
|
||||
libev \
|
||||
libtool \
|
||||
libcap \
|
||||
libffi-dev \
|
||||
libxslt \
|
||||
libxslt-dev \
|
||||
mariadb-dev \
|
||||
pkgconfig \
|
||||
procps \
|
||||
python3 \
|
||||
python3-dev \
|
||||
py3-cffi \
|
||||
py3-cryptography \
|
||||
py3-freezegun \
|
||||
py3-gevent \
|
||||
py3-lxml \
|
||||
py3-natsort \
|
||||
py3-pip \
|
||||
py3-ply \
|
||||
py3-psutil \
|
||||
py3-pycryptodomex \
|
||||
py3-pytest \
|
||||
py3-requests \
|
||||
py3-pyserial \
|
||||
py3-setuptools \
|
||||
py3-slugify \
|
||||
py3-snmp \
|
||||
py3-sphinx \
|
||||
py3-wheel \
|
||||
py3-zope-event \
|
||||
py3-zope-interface \
|
||||
wget && \
|
||||
#
|
||||
# Setup ConPot
|
||||
git clone --depth=1 https://github.com/mushorg/conpot /opt/conpot && \
|
||||
git clone https://github.com/t3chn0m4g3/cpppo /opt/cpppo && \
|
||||
cd /opt/cpppo && \
|
||||
git checkout 350d5187a941e7359c53087dcb1f0e41ece5682c && \
|
||||
pip3 install --break-system-packages --no-cache-dir --upgrade pip && \
|
||||
pip3 install --break-system-packages --no-cache-dir . && \
|
||||
git clone https://github.com/mushorg/conpot /opt/conpot && \
|
||||
cd /opt/conpot/ && \
|
||||
# Patch to accept ENV for MIB path
|
||||
sed -i "s/tmp_mib_dir = tempfile.mkdtemp()/tmp_mib_dir = tempfile.mkdtemp(dir=os.environ['CONPOT_TMP'])/" /opt/conpot/conpot/protocols/snmp/snmp_server.py && \
|
||||
git checkout 26c67d11b08a855a28e87abd186d959741f46c7f && \
|
||||
# Change template default ports if <1024
|
||||
sed -i 's/port="2121"/port="21"/' /opt/conpot/conpot/templates/default/ftp/ftp.xml && \
|
||||
sed -i 's/port="8800"/port="80"/' /opt/conpot/conpot/templates/default/http/http.xml && \
|
||||
|
@ -40,24 +62,24 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
|
|||
sed -i 's/port="6969"/port="69"/' /opt/conpot/conpot/templates/default/tftp/tftp.xml && \
|
||||
sed -i 's/port="16100"/port="161"/' /opt/conpot/conpot/templates/IEC104/snmp/snmp.xml && \
|
||||
sed -i 's/port="6230"/port="623"/' /opt/conpot/conpot/templates/ipmi/ipmi/ipmi.xml && \
|
||||
pip3 install --no-cache-dir -U setuptools && \
|
||||
pip3 install --no-cache-dir . && \
|
||||
cp /root/dist/requirements.txt . && \
|
||||
pip3 install --break-system-packages --no-cache-dir . && \
|
||||
cd / && \
|
||||
rm -rf /opt/conpot /tmp/* /var/tmp/* && \
|
||||
setcap cap_net_bind_service=+ep /usr/bin/python3.8 && \
|
||||
setcap cap_net_bind_service=+ep $(readlink -f $(type -P python3)) && \
|
||||
#
|
||||
# Get wireshark manuf db for scapy, setup configs, user, groups
|
||||
mkdir -p /etc/conpot /var/log/conpot /usr/share/wireshark && \
|
||||
wget https://github.com/wireshark/wireshark/raw/master/manuf -o /usr/share/wireshark/manuf && \
|
||||
wget https://www.wireshark.org/download/automated/data/manuf -o /usr/share/wireshark/manuf && \
|
||||
cp /root/dist/conpot.cfg /etc/conpot/conpot.cfg && \
|
||||
cp -R /root/dist/templates /usr/lib/python3.8/site-packages/conpot/ && \
|
||||
cp -R /root/dist/templates /usr/lib/$(readlink -f $(type -P python3) | cut -f4 -d"/")/site-packages/conpot/ && \
|
||||
cp /root/dist/cpu_check.py / && \
|
||||
addgroup -g 2000 conpot && \
|
||||
adduser -S -s /bin/ash -u 2000 -D -g 2000 conpot && \
|
||||
#
|
||||
# Clean up
|
||||
apk del --purge \
|
||||
build-base \
|
||||
cython-dev \
|
||||
file \
|
||||
git \
|
||||
libev \
|
||||
|
@ -65,14 +87,18 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
|
|||
libxslt-dev \
|
||||
mariadb-dev \
|
||||
pkgconfig \
|
||||
py3-pip \
|
||||
python3-dev \
|
||||
py-cffi \
|
||||
wget && \
|
||||
rm -rf /root/* && \
|
||||
rm -rf /tmp/* && \
|
||||
rm -rf /var/cache/apk/*
|
||||
rm -rf /root/* \
|
||||
/tmp/* \
|
||||
/var/cache/apk/* \
|
||||
/opt/cpppo/.git \
|
||||
/opt/conpot/.git
|
||||
#
|
||||
# Start conpot
|
||||
STOPSIGNAL SIGINT
|
||||
# Conpot sometimes hangs at 100% CPU usage, if detected container will become unhealthy and restarted by tpotinit
|
||||
HEALTHCHECK --interval=5m --timeout=30s --retries=3 CMD python3 /cpu_check.py $(pgrep -of conpot) 99
|
||||
USER conpot:conpot
|
||||
CMD exec /usr/bin/conpot --temp_dir $CONPOT_TMP --template $CONPOT_TEMPLATE --logfile $CONPOT_LOG --config $CONPOT_CONFIG
|
||||
CMD exec /usr/bin/conpot --mibcache $CONPOT_TMP --temp_dir $CONPOT_TMP --template $CONPOT_TEMPLATE --logfile $CONPOT_LOG --config $CONPOT_CONFIG
|
||||
|
|
1123
docker/conpot/dist/command_responder.py
vendored
2
docker/conpot/dist/conpot.cfg
vendored
|
@ -3,7 +3,7 @@ sensorid = conpot
|
|||
|
||||
[virtual_file_system]
|
||||
data_fs_url = %(CONPOT_TMP)s
|
||||
fs_url = tar:///usr/lib/python3.8/site-packages/conpot/data.tar
|
||||
fs_url = tar:///usr/lib/python3.11/site-packages/conpot/data.tar
|
||||
|
||||
[session]
|
||||
timeout = 30
|
||||
|
|
42
docker/conpot/dist/cpu_check.py
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
import psutil
|
||||
import sys
|
||||
import time
|
||||
|
||||
if len(sys.argv) != 3:
|
||||
print("Usage: cpu_check.py <PID> <CPU_USAGE_THRESHOLD>")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
pid = int(sys.argv[1])
|
||||
except ValueError:
|
||||
print("Please provide a valid integer value for the PID.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
cpu_threshold = float(sys.argv[2])
|
||||
except ValueError:
|
||||
print("Please provide a valid number for the CPU usage threshold.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
target_process = psutil.Process(pid)
|
||||
except psutil.NoSuchProcess:
|
||||
print(f"No process with the PID {pid} was found.")
|
||||
sys.exit(1)
|
||||
|
||||
# Prepare to calculate the average CPU usage over 3 intervals of 1 second each
|
||||
cpu_usages = []
|
||||
for _ in range(3):
|
||||
cpu_usages.append(target_process.cpu_percent(interval=1))
|
||||
|
||||
# Calculate the average CPU usage
|
||||
average_cpu_usage = sum(cpu_usages) / len(cpu_usages)
|
||||
print(f"Average CPU Usage of PID {pid} over 3 seconds: {average_cpu_usage}%")
|
||||
|
||||
# Check average CPU usage against the threshold
|
||||
if average_cpu_usage >= cpu_threshold:
|
||||
print(f"Average CPU usage of PID {pid} is above or equal to the threshold of {cpu_threshold}%.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(f"Average CPU usage of PID {pid} is below the threshold of {cpu_threshold}%. Exiting with code 0.")
|
||||
sys.exit(0)
|
18
docker/conpot/dist/requirements.txt
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
pysnmp-mibs
|
||||
pysmi==0.3.4
|
||||
libtaxii>=1.1.0
|
||||
crc16
|
||||
scapy==2.4.5
|
||||
hpfeeds3
|
||||
modbus-tk
|
||||
stix-validator
|
||||
stix
|
||||
cybox
|
||||
bacpypes==0.17.0
|
||||
pyghmi==1.4.1
|
||||
mixbox
|
||||
modbus-tk
|
||||
fs==2.3.0
|
||||
tftpy
|
||||
# some freezegun versions broken
|
||||
sphinx_rtd_theme
|
22
docker/conpot/dist/templates/IEC104/template.xml
vendored
|
@ -70,7 +70,7 @@
|
|||
<value type="value">100000000</value>
|
||||
</key>
|
||||
<key name="ifPhysAddress">
|
||||
<value type="value">"\x00\x0e\x8c\x29\xc5\x1a"</value>
|
||||
<value type="value">"0x000e8c29c51a"</value>
|
||||
</key>
|
||||
<key name="ifAdminStatus">
|
||||
<value type="value">1</value>
|
||||
|
@ -91,19 +91,19 @@
|
|||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="ifInOctets">
|
||||
<value type="value">1618895</value>
|
||||
<value type="function">conpot.emulators.misc.sysinfo.BytesRecv</value>
|
||||
</key>
|
||||
<key name="ifInUcastPkts">
|
||||
<value type="value">7018</value>
|
||||
<value type="function">conpot.emulators.misc.sysinfo.PacketsRecv</value>
|
||||
</key>
|
||||
<key name="ifInNUcastPkts">
|
||||
<value type="value">291</value>
|
||||
</key>
|
||||
<key name="ifOutOctets">
|
||||
<value type="value">455107</value>
|
||||
<value type="function">conpot.emulators.misc.sysinfo.BytesSent</value>
|
||||
</key>
|
||||
<key name="ifOutUcastPkts">
|
||||
<value type="value">872264</value>
|
||||
<value type="function">conpot.emulators.misc.sysinfo.PacketsSent</value>
|
||||
</key>
|
||||
<key name="ifOutUNcastPkts">
|
||||
<value type="value">143</value>
|
||||
|
@ -168,7 +168,7 @@
|
|||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="ipAdEntAddr">
|
||||
<value type="value">"217.172.190.137"</value>
|
||||
<value type="function">conpot.emulators.misc.sysinfo.LocalIP</value>
|
||||
</key>
|
||||
<key name="ipAdEntIfIndex">
|
||||
<value type="value">1</value>
|
||||
|
@ -290,7 +290,7 @@
|
|||
<value type="value">45</value>
|
||||
</key>
|
||||
<key name="tcpCurrEstab">
|
||||
<value type="value">0</value>
|
||||
<value type="function">conpot.emulators.misc.sysinfo.TcpCurrEstab</value>
|
||||
</key>
|
||||
<key name="tcpInSegs">
|
||||
<value type="value">30321</value>
|
||||
|
@ -305,7 +305,7 @@
|
|||
<value type="value">2</value>
|
||||
</key>
|
||||
<key name="tcpConnLocalAddress">
|
||||
<value type="value">"217.172.190.137"</value>
|
||||
<value type="function">conpot.emulators.misc.sysinfo.LocalIP</value>
|
||||
</key>
|
||||
<key name="tcpConnLocalPort">
|
||||
<value type="value">2404</value>
|
||||
|
@ -336,7 +336,7 @@
|
|||
<value type="value">47</value>
|
||||
</key>
|
||||
<key name="udpLocalAddress">
|
||||
<value type="value">"217.172.190.137"</value>
|
||||
<value type="value">"163.172.189.137"</value>
|
||||
</key>
|
||||
<key name="udpLocalPort">
|
||||
<value type="value">161</value>
|
||||
|
@ -347,6 +347,10 @@
|
|||
|
||||
|
||||
<!-- IEC104 Protocol parameter -->
|
||||
<!-- Common (Object) Address, aka COA, Station Address -->
|
||||
<key name="CommonAddress">
|
||||
<value type="value">"0x1e28"</value>
|
||||
</key>
|
||||
<!-- Timeout of connection establishment -->
|
||||
<key name="T_0">
|
||||
<value type="value">30</value>
|
||||
|
|