Work in progress!

This is the foundation for the distributed T-Pot feature,
highly work in progress, only works with local docker image builds,
will be available for prod for upcoming T-Pot 22xx.
This commit is contained in:
t3chn0m4g3 2022-01-03 18:24:17 +00:00
parent ef1a1fa057
commit 68b080a3a8
11 changed files with 846 additions and 32 deletions

View file

@ -2,6 +2,7 @@
# Let's add the first local ip to the /etc/issue and external ip to ews.ip file
# If the external IP cannot be detected, the internal IP will be inherited.
source /etc/environment
source /data/elk/logstash/ls_environment
myUUID=$(lsblk -o MOUNTPOINT,UUID | grep "/" | awk '{ print $2 }')
myLOCALIP=$(hostname -I | awk '{ print $1 }')
myEXTIP=$(/opt/tpot/bin/myip.sh)
@ -32,5 +33,17 @@ MY_EXTIP=$myEXTIP
MY_INTIP=$myLOCALIP
MY_HOSTNAME=$HOSTNAME
EOF
if [ -s "/data/elk/logstash/ls_environment" ];
then
source /data/elk/logstash/ls_environment
tee -a /opt/tpot/etc/compose/elk_environment << EOF
MY_TPOT_TYPE=$MY_TPOT_TYPE
MY_POT_PRIVATEKEYFILE=$MY_POT_PRIVATEKEYFILE
MY_HIVE_USERNAME=$MY_HIVE_USERNAME
MY_HIVE_IP=$MY_HIVE_IP
EOF
fi
chown tpot:tpot /data/ews/conf/ews.ip
chmod 770 /data/ews/conf/ews.ip

View file

@ -47,6 +47,7 @@ RUN apk -U --no-cache add \
cp http_input.conf /etc/logstash/conf.d/ && \
cp http_output.conf /etc/logstash/conf.d/ && \
cp pipelines.yml /usr/share/logstash/config/pipelines.yml && \
cp pipelines_pot.yml /usr/share/logstash/config/pipelines_pot.yml && \
cp tpot_es_template.json /etc/logstash/ && \
#
# Setup user, groups and configs
@ -67,4 +68,5 @@ HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:9600'
# Start logstash
#USER logstash:logstash
#CMD update.sh && exec /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash.conf --config.reload.automatic --java-execution --log.level debug
CMD update.sh && exec /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/http_output.conf --config.reload.automatic --java-execution
#CMD update.sh && exec /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/http_output.conf --config.reload.automatic --java-execution
CMD update.sh && exec /usr/share/logstash/bin/logstash --config.reload.automatic --java-execution

View file

@ -16,6 +16,11 @@ fi
}
function fuDEPLOY_POT () {
echo
echo "###############################"
echo "# Deploying to T-Pot Hive ... #"
echo "###############################"
echo
sshpass -e ssh -4 -t -T -l "$MY_TPOT_USERNAME" -p 64295 "$MY_HIVE_IP" << EOF
echo "$SSHPASS" | sudo -S bash -c 'useradd -m -s /sbin/nologin -G tpotlogs "$MY_HIVE_USERNAME";
mkdir -p /home/"$MY_HIVE_USERNAME"/.ssh;
@ -24,27 +29,65 @@ chmod 600 /home/"$MY_HIVE_USERNAME"/.ssh/authorized_keys;
chmod 755 /home/"$MY_HIVE_USERNAME"/.ssh;
chown "$MY_HIVE_USERNAME":"$MY_HIVE_USERNAME" -R /home/"$MY_HIVE_USERNAME"/.ssh'
EOF
exit
echo
echo "###########################"
echo "# Done. Please reboot ... #"
echo "###########################"
echo
exit 0
}
# Check Hive availability
function fuCHECK_HIVE () {
echo
echo "############################################"
echo "# Checking for T-Pot Hive availability ... #"
echo "############################################"
echo
sshpass -e ssh -4 -t -l "$MY_TPOT_USERNAME" -p 64295 -f -N -L64305:127.0.0.1:64305 "$MY_HIVE_IP"
if [ $? -eq 0 ];
then
echo ssh success
echo
echo "#########################"
echo "# T-Pot Hive available! #"
echo "#########################"
echo
myHIVE_OK=$(curl -s http://127.0.0.1:64305)
if [ "$myHIVE_OK" == "ok" ];
then
echo ssh tunnel success
echo
echo "##############################"
echo "# T-Pot Hive tunnel test OK! #"
echo "##############################"
echo
kill -9 $(pidof ssh)
else
echo tunneled port 64305 on Hive unreachable
echo aborting
echo
echo "######################################################"
echo "# T-Pot Hive tunnel test FAILED! #"
echo "# Tunneled port tcp/64305 unreachable on T-Pot Hive. #"
echo "# Aborting. #"
echo "######################################################"
echo
kill -9 $(pidof ssh)
rm $MY_POT_PUBLICKEYFILE
rm $MY_POT_PRIVATEKEYFILE
rm $MY_LS_ENVCONFIGFILE
exit 1
fi;
else
echo ssh on Hive unreachable
echo
echo "#################################################################"
echo "# Something went wrong, most likely T-Pot Hive was unreachable! #"
echo "# Aborting. #"
echo "#################################################################"
echo
rm $MY_POT_PUBLICKEYFILE
rm $MY_POT_PRIVATEKEYFILE
rm $MY_LS_ENVCONFIGFILE
exit 1
fi;
}
@ -63,41 +106,63 @@ export SSHPASS
read -p "IP / FQDN: " MY_HIVE_IP
MY_HIVE_USERNAME="$(hostname)"
MY_TPOT_TYPE="POT"
MY_LS_ENVCONFIGFILE="/data/elk/logstash/ls_environment"
echo "$MY_TPOT_USERNAME"
echo "$MY_HIVE_USERNAME"
echo "$SSHPASS"
echo "$MY_HIVE_IP"
echo "$MY_TPOT_TYPE"
#echo "$SSHPASS"
MY_POT_PUBLICKEYFILE="/data/elk/logstash/$MY_HIVE_USERNAME.pub"
MY_POT_PRIVATEKEYFILE="/data/elk/logstash/$MY_HIVE_USERNAME"
if ! [ -s "$MY_POT_PRIVATEKEYFILE" ] && ! [ -s "$MY_POT_PUBLICKEYFILE" ];
then
echo "we need to gen a keyfile"
echo
echo "##############################"
echo "# Generating ssh keyfile ... #"
echo "##############################"
echo
mkdir -p /data/elk/logstash
ssh-keygen -f "$MY_POT_PRIVATEKEYFILE" -N "" -C "$MY_HIVE_USERNAME"
MY_POT_PUBLICKEY="$(cat "$MY_POT_PUBLICKEYFILE")"
echo "$MY_POT_PUBLICKEY"
else
echo "there is a keyfile already, exiting"
exit
echo
echo "#############################################"
echo "# There is already a ssh keyfile. Aborting. #"
echo "#############################################"
echo
exit 1
fi
echo
echo "###########################################################"
echo "# Writing config to /data/elk/logstash/ls_environment. #"
echo "# If you make changes to this file, you need to reboot or #"
echo "# run /opt/tpot/bin/updateip.sh. #"
echo "###########################################################"
echo
tee $MY_LS_ENVCONFIGFILE << EOF
MY_TPOT_TYPE="$MY_TPOT_TYPE"
MY_POT_PRIVATEKEYFILE="$MY_POT_PRIVATEKEYFILE"
MY_HIVE_USERNAME="$MY_HIVE_USERNAME"
MY_HIVE_IP="$MY_HIVE_IP"
EOF
}
# Deploy Pot to Hive
fuGOT_ROOT
echo
echo "-----------------------------"
echo "Ship T-Pot Logs to T-Pot Hive"
echo "-----------------------------"
echo "Executing this script will ship all logs to a T-Pot Hive installation."
echo "#################################"
echo "# Ship T-Pot Logs to T-Pot Hive #"
echo "#################################"
echo
echo "If ..."
echo "1. You already have a T-Pot Hive installation running and"
echo "2. This T-Pot installation is running the type \"Pot\""
echo "... the script will guide you to deploy this T-Pot's logs to the Hive."
echo
echo
echo "------------------------------------"
echo "Please provide data from your T-Pot "
echo "------------------------------------"
echo "###################################"
echo "# Deploy T-Pot Logs to T-Pot Hive #"
echo "###################################"
echo
echo "[c] - Continue deplyoment"
#echo "[0] - Rollback"
echo "[q] - Abort and exit"
echo
while [ 1 != 2 ]
@ -111,13 +176,9 @@ while [ 1 != 2 ]
fuDEPLOY_POT
break
;;
# [0])
# fuOPTOUT
# break
# ;;
[q,Q])
echo "Aborted."
exit
exit 0
;;
esac
done

View file

@ -147,6 +147,13 @@ input {
type => "Ipphoney"
}
# Log4pot
file {
path => ["/data/log4pot/log/log4pot.log"]
codec => json
type => "Log4pot"
}
# Mailoney
file {
path => ["/data/mailoney/log/commands.log"]
@ -564,6 +571,20 @@ filter {
}
}
# Log4pot
if [type] == "Log4pot" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"server_port" => "dest_port"
"port" => "src_port"
"client" => "src_ip"
}
}
}
# Mailoney
if [type] == "Mailoney" {
date {
@ -649,12 +670,12 @@ if "_jsonparsefailure" in [tags] { drop {} }
geoip {
cache_size => 10000
source => "src_ip"
database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-7.2.3-java/vendor/GeoLite2-City.mmdb"
database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-7.2.8-java/vendor/GeoLite2-City.mmdb"
}
geoip {
cache_size => 10000
source => "src_ip"
database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-7.2.3-java/vendor/GeoLite2-ASN.mmdb"
database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-7.2.8-java/vendor/GeoLite2-ASN.mmdb"
}
translate {
refresh_interval => 86400

View file

@ -730,7 +730,7 @@ output {
# With templates now being legacy and ILM in place we need to set the daily index with its template manually. Otherwise a new index might be created with differents settings configured through Kibana.
index => "logstash-%{+YYYY.MM.dd}"
template => "/etc/logstash/tpot_es_template.json"
# document_type => "doc"
#document_type => "doc"
}
#if [type] == "Suricata" {

55
docker/elk/logstash/dist/logstash.sh vendored Normal file
View file

@ -0,0 +1,55 @@
#!/bin/bash
# Let's ensure normal operation on exit or if interrupted ...
function fuCLEANUP {
exit 0
}
trap fuCLEANUP EXIT
# Check internet availability
function fuCHECKINET () {
mySITES=$1
error=0
for i in $mySITES;
do
curl --connect-timeout 5 -Is $i 2>&1 > /dev/null
if [ $? -ne 0 ];
then
let error+=1
fi;
done;
echo $error
}
# Check for connectivity and download latest translation maps
myCHECK=$(fuCHECKINET "listbot.sicherheitstacho.eu")
if [ "$myCHECK" == "0" ];
then
echo "Connection to Listbot looks good, now downloading latest translation maps."
cd /etc/listbot
aria2c -s16 -x 16 https://listbot.sicherheitstacho.eu/cve.yaml.bz2 && \
aria2c -s16 -x 16 https://listbot.sicherheitstacho.eu/iprep.yaml.bz2 && \
bunzip2 -f *.bz2
cd /
else
echo "Cannot reach Listbot, starting Logstash without latest translation maps."
fi
exit
# notizen
MY_TPOT_TYPE Standard = SINGLE, Distributed = POT
Wenn POT
autossh -f -M 0 -4 -l tpot01 -i /data/elk/logstash/tpot01 -p 64295 -N -L64305:127.0.0.1:64305 172.20.254.194 -o "ServerAliveInterval 30" -o "ServerAliveCountMax 3" -o "StrictHostKeyChecking=no" -o "UserKnownHostsFile=/dev/null"
exec /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/http_output.conf --config.reload.automatic --java-execution
Wenn SINGLE
exec /usr/share/logstash/bin/logstash --config.reload.automatic --java-execution
Umgebungsvariable holen aus /data/elk/logstash
m besten über das ELK Environment file, damit es keine probleme gibt

View file

@ -0,0 +1,2 @@
- pipeline.id: http_output
path.config: "/etc/logstash/conf.d/http_output.conf"

View file

@ -35,6 +35,22 @@ if [ "$myCHECK" == "0" ];
echo "Cannot reach Listbot, starting Logstash without latest translation maps."
fi
# Distributed T-Pot installation needs a different pipeline config and autossh tunnel.
if [ "$MY_TPOT_TYPE" == "POT" ];
then
echo
echo "Distributed T-Pot setup, sending T-Pot logs to $MY_HIVE_IP."
echo
echo "T-Pot type: $MY_TPOT_TYPE"
echo "Keyfile used: $MY_POT_PRIVATEKEYFILE"
echo "Hive username: $MY_HIVE_USERNAME"
echo "Hive IP: $MY_HIVE_IP"
echo
cp /usr/share/logstash/config/pipelines_pot.yml /usr/share/logstash/config/pipelines.yml
autossh -f -M 0 -v -4 -l $MY_HIVE_USERNAME -i $MY_POT_PRIVATEKEYFILE -p 64295 -N -L64305:127.0.0.1:64305 $MY_HIVE_IP -o "ServerAliveInterval 30" -o "ServerAliveCountMax 3" -o "StrictHostKeyChecking=no" -o "UserKnownHostsFile=/dev/null"
exit 0
fi
# We do want to enforce our es_template thus we always need to delete the default template, putting our default afterwards
# This is now done via common_configs.rb => overwrite default logstash template
echo "Removing logstash template."

View file

@ -0,0 +1,132 @@
# T-Pot (Hive)
# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton)
version: '2.3'
networks:
cyberchef_local:
spiderfoot_local:
services:
##################
#### Tools
##################
# Cyberchef service
cyberchef:
container_name: cyberchef
restart: always
networks:
- cyberchef_local
ports:
- "127.0.0.1:64299:8000"
image: "dtagdevsec/cyberchef:2006"
read_only: true
#### ELK
## Elasticsearch service
elasticsearch:
container_name: elasticsearch
restart: always
environment:
- bootstrap.memory_lock=true
# - ES_JAVA_OPTS=-Xms2048m -Xmx2048m
- ES_TMPDIR=/tmp
cap_add:
- IPC_LOCK
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
# mem_limit: 4g
ports:
- "127.0.0.1:64298:9200"
image: "dtagdevsec/elasticsearch:2006"
volumes:
- /data:/data
## Kibana service
kibana:
container_name: kibana
restart: always
depends_on:
elasticsearch:
condition: service_healthy
ports:
- "127.0.0.1:64296:5601"
image: "dtagdevsec/kibana:2006"
## Logstash service
logstash:
container_name: logstash
restart: always
# environment:
# - LS_JAVA_OPTS=-Xms2048m -Xmx2048m
depends_on:
elasticsearch:
condition: service_healthy
env_file:
- /opt/tpot/etc/compose/elk_environment
image: "dtagdevsec/logstash:2006"
volumes:
- /data:/data
## Elasticsearch-head service
head:
container_name: head
restart: always
depends_on:
elasticsearch:
condition: service_healthy
ports:
- "127.0.0.1:64302:9100"
image: "dtagdevsec/head:2006"
read_only: true
# Nginx service
nginx:
container_name: nginx
restart: always
environment:
### If set to YES all changes within Heimdall will remain for the next start
### Make sure to uncomment the corresponding volume statements below, or the setting will prevent a successful start of T-Pot.
- HEIMDALL_PERSIST=NO
tmpfs:
- /var/tmp/nginx/client_body
- /var/tmp/nginx/proxy
- /var/tmp/nginx/fastcgi
- /var/tmp/nginx/uwsgi
- /var/tmp/nginx/scgi
- /run
- /var/log/php7/
- /var/lib/nginx/tmp:uid=100,gid=82
- /var/lib/nginx/html/storage/logs:uid=100,gid=82
- /var/lib/nginx/html/storage/framework/views:uid=100,gid=82
network_mode: "host"
ports:
- "64297:64297"
- "127.0.0.1:64304:64304"
image: "dtagdevsec/nginx:2006"
read_only: true
volumes:
- /data/nginx/cert/:/etc/nginx/cert/:ro
- /data/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
- /data/nginx/log/:/var/log/nginx/
### Enable the following volumes if you set HEIMDALL_PERSIST=YES
# - /data/nginx/heimdall/database:/var/lib/nginx/html/database
# - /data/nginx/heimdall/storage:/var/lib/nginx/html/storage
# Spiderfoot service
spiderfoot:
container_name: spiderfoot
restart: always
networks:
- spiderfoot_local
ports:
- "127.0.0.1:64303:8080"
image: "dtagdevsec/spiderfoot:2006"
volumes:
- /data/spiderfoot/spiderfoot.db:/home/spiderfoot/spiderfoot.db

511
docker/elk/logstash/pot.yml Normal file
View file

@ -0,0 +1,511 @@
# T-Pot (Pot)
# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton)
version: '2.3'
networks:
adbhoney_local:
citrixhoneypot_local:
conpot_local_IEC104:
conpot_local_guardian_ast:
conpot_local_ipmi:
conpot_local_kamstrup_382:
cowrie_local:
dicompot_local:
dionaea_local:
elasticpot_local:
heralding_local:
honeysap_local:
logstash_local:
mailoney_local:
medpot_local:
rdpy_local:
tanner_local:
ewsposter_local:
services:
##################
#### Honeypots
##################
# Adbhoney service
adbhoney:
container_name: adbhoney
restart: always
networks:
- adbhoney_local
ports:
- "5555:5555"
image: "dtagdevsec/adbhoney:2006"
read_only: true
volumes:
- /data/adbhoney/log:/opt/adbhoney/log
- /data/adbhoney/downloads:/opt/adbhoney/dl
# Ciscoasa service
ciscoasa:
container_name: ciscoasa
restart: always
tmpfs:
- /tmp/ciscoasa:uid=2000,gid=2000
network_mode: "host"
ports:
- "5000:5000/udp"
- "8443:8443"
image: "dtagdevsec/ciscoasa:2006"
read_only: true
volumes:
- /data/ciscoasa/log:/var/log/ciscoasa
# CitrixHoneypot service
citrixhoneypot:
container_name: citrixhoneypot
restart: always
networks:
- citrixhoneypot_local
ports:
- "443:443"
image: "dtagdevsec/citrixhoneypot:2006"
read_only: true
volumes:
- /data/citrixhoneypot/logs:/opt/citrixhoneypot/logs
# Conpot IEC104 service
conpot_IEC104:
container_name: conpot_iec104
restart: always
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
- CONPOT_TEMPLATE=IEC104
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_IEC104
ports:
- "161:161/udp"
- "2404:2404"
image: "dtagdevsec/conpot:2006"
read_only: true
volumes:
- /data/conpot/log:/var/log/conpot
# Conpot guardian_ast service
conpot_guardian_ast:
container_name: conpot_guardian_ast
restart: always
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
- CONPOT_TEMPLATE=guardian_ast
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_guardian_ast
ports:
- "10001:10001"
image: "dtagdevsec/conpot:2006"
read_only: true
volumes:
- /data/conpot/log:/var/log/conpot
# Conpot ipmi
conpot_ipmi:
container_name: conpot_ipmi
restart: always
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
- CONPOT_TEMPLATE=ipmi
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_ipmi
ports:
- "623:623/udp"
image: "dtagdevsec/conpot:2006"
read_only: true
volumes:
- /data/conpot/log:/var/log/conpot
# Conpot kamstrup_382
conpot_kamstrup_382:
container_name: conpot_kamstrup_382
restart: always
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
- CONPOT_TEMPLATE=kamstrup_382
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_kamstrup_382
ports:
- "1025:1025"
- "50100:50100"
image: "dtagdevsec/conpot:2006"
read_only: true
volumes:
- /data/conpot/log:/var/log/conpot
# Cowrie service
cowrie:
container_name: cowrie
restart: always
tmpfs:
- /tmp/cowrie:uid=2000,gid=2000
- /tmp/cowrie/data:uid=2000,gid=2000
networks:
- cowrie_local
ports:
- "22:22"
- "23:23"
image: "dtagdevsec/cowrie:2006"
read_only: true
volumes:
- /data/cowrie/downloads:/home/cowrie/cowrie/dl
- /data/cowrie/keys:/home/cowrie/cowrie/etc
- /data/cowrie/log:/home/cowrie/cowrie/log
- /data/cowrie/log/tty:/home/cowrie/cowrie/log/tty
# Dicompot service
# Get the Horos Client for testing: https://horosproject.org/
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
dicompot:
container_name: dicompot
restart: always
networks:
- dicompot_local
ports:
- "11112:11112"
image: "dtagdevsec/dicompot:2006"
read_only: true
volumes:
- /data/dicompot/log:/var/log/dicompot
# - /data/dicompot/images:/opt/dicompot/images
# Dionaea service
dionaea:
container_name: dionaea
stdin_open: true
tty: true
restart: always
networks:
- dionaea_local
ports:
- "20:20"
- "21:21"
- "42:42"
- "69:69/udp"
- "81:81"
- "135:135"
# - "443:443"
- "445:445"
- "1433:1433"
- "1723:1723"
- "1883:1883"
- "3306:3306"
- "5060:5060"
- "5060:5060/udp"
- "5061:5061"
- "27017:27017"
image: "dtagdevsec/dionaea:2006"
read_only: true
volumes:
- /data/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
- /data/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
- /data/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
- /data/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
- /data/dionaea:/opt/dionaea/var/dionaea
- /data/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
- /data/dionaea/log:/opt/dionaea/var/log
- /data/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
# ElasticPot service
elasticpot:
container_name: elasticpot
restart: always
networks:
- elasticpot_local
ports:
- "9200:9200"
image: "dtagdevsec/elasticpot:2006"
read_only: true
volumes:
- /data/elasticpot/log:/opt/elasticpot/log
# Heralding service
heralding:
container_name: heralding
restart: always
tmpfs:
- /tmp/heralding:uid=2000,gid=2000
networks:
- heralding_local
ports:
# - "21:21"
# - "22:22"
# - "23:23"
# - "25:25"
# - "80:80"
- "110:110"
- "143:143"
# - "443:443"
- "465:465"
- "993:993"
- "995:995"
# - "3306:3306"
# - "3389:3389"
- "1080:1080"
- "5432:5432"
- "5900:5900"
image: "dtagdevsec/heralding:2006"
read_only: true
volumes:
- /data/heralding/log:/var/log/heralding
# HoneySAP service
honeysap:
container_name: honeysap
restart: always
networks:
- honeysap_local
ports:
- "3299:3299"
image: "dtagdevsec/honeysap:2006"
volumes:
- /data/honeysap/log:/opt/honeysap/log
# Honeytrap service
honeytrap:
container_name: honeytrap
restart: always
tmpfs:
- /tmp/honeytrap:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: "dtagdevsec/honeytrap:2006"
read_only: true
volumes:
- /data/honeytrap/attacks:/opt/honeytrap/var/attacks
- /data/honeytrap/downloads:/opt/honeytrap/var/downloads
- /data/honeytrap/log:/opt/honeytrap/var/log
# Mailoney service
mailoney:
container_name: mailoney
restart: always
environment:
- HPFEEDS_SERVER=
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=20000
- HPFEEDS_CHANNELPREFIX=prefix
networks:
- mailoney_local
ports:
- "25:25"
image: "dtagdevsec/mailoney:2006"
read_only: true
volumes:
- /data/mailoney/log:/opt/mailoney/logs
# Medpot service
medpot:
container_name: medpot
restart: always
networks:
- medpot_local
ports:
- "2575:2575"
image: "dtagdevsec/medpot:2006"
read_only: true
volumes:
- /data/medpot/log/:/var/log/medpot
# Rdpy service
rdpy:
container_name: rdpy
extra_hosts:
- hpfeeds.example.com:127.0.0.1
restart: always
environment:
- HPFEEDS_SERVER=hpfeeds.example.com
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=65000
- SERVERID=id
networks:
- rdpy_local
ports:
- "3389:3389"
image: "dtagdevsec/rdpy:2006"
read_only: true
volumes:
- /data/rdpy/log:/var/log/rdpy
#### Snare / Tanner
## Tanner Redis Service
tanner_redis:
container_name: tanner_redis
restart: always
tty: true
networks:
- tanner_local
image: "dtagdevsec/redis:2006"
read_only: true
## PHP Sandbox service
tanner_phpox:
container_name: tanner_phpox
restart: always
tty: true
networks:
- tanner_local
image: "dtagdevsec/phpox:2006"
read_only: true
## Tanner API Service
tanner_api:
container_name: tanner_api
restart: always
tmpfs:
- /tmp/tanner:uid=2000,gid=2000
tty: true
networks:
- tanner_local
image: "dtagdevsec/tanner:2006"
read_only: true
volumes:
- /data/tanner/log:/var/log/tanner
command: tannerapi
depends_on:
- tanner_redis
## Tanner Service
tanner:
container_name: tanner
restart: always
tmpfs:
- /tmp/tanner:uid=2000,gid=2000
tty: true
networks:
- tanner_local
image: "dtagdevsec/tanner:2006"
command: tanner
read_only: true
volumes:
- /data/tanner/log:/var/log/tanner
- /data/tanner/files:/opt/tanner/files
depends_on:
- tanner_api
# - tanner_web
- tanner_phpox
## Snare Service
snare:
container_name: snare
restart: always
tty: true
networks:
- tanner_local
ports:
- "80:80"
image: "dtagdevsec/snare:2006"
depends_on:
- tanner
##################
#### NSM
##################
# Fatt service
fatt:
container_name: fatt
restart: always
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: "dtagdevsec/fatt:2006"
volumes:
- /data/fatt/log:/opt/fatt/log
# P0f service
p0f:
container_name: p0f
restart: always
network_mode: "host"
image: "dtagdevsec/p0f:2006"
read_only: true
volumes:
- /data/p0f/log:/var/log/p0f
# Suricata service
suricata:
container_name: suricata
restart: always
environment:
# For ET Pro ruleset replace "OPEN" with your OINKCODE
- OINKCODE=OPEN
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: "dtagdevsec/suricata:2006"
volumes:
- /data/suricata/log:/var/log/suricata
##################
#### Tools
##################
# Logstash service
logstash:
container_name: logstash
restart: always
networks:
- logstash_local
# environment:
# - LS_JAVA_OPTS=-Xms2048m -Xmx2048m
env_file:
- /opt/tpot/etc/compose/elk_environment
image: "dtagdevsec/logstash:2006"
volumes:
- /data:/data
# Ewsposter service
ewsposter:
container_name: ewsposter
restart: always
networks:
- ewsposter_local
environment:
- EWS_HPFEEDS_ENABLE=false
- EWS_HPFEEDS_HOST=host
- EWS_HPFEEDS_PORT=port
- EWS_HPFEEDS_CHANNELS=channels
- EWS_HPFEEDS_IDENT=user
- EWS_HPFEEDS_SECRET=secret
- EWS_HPFEEDS_TLSCERT=false
- EWS_HPFEEDS_FORMAT=json
env_file:
- /opt/tpot/etc/compose/elk_environment
image: "dtagdevsec/ewsposter:2006"
volumes:
- /data:/data
- /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip

View file

@ -2,4 +2,5 @@ not (host sicherheitstacho.eu or community.sicherheitstacho.eu or listbot.sicher
not (host rules.emergingthreats.net or rules.emergingthreatspro.com) and
not (host deb.debian.org) and
not (host ghcr.io) and
not (host index.docker.io or docker.io)
not (host index.docker.io or docker.io) and
not (tcp port 64305)