where possible kibana visualizations are converted to lens objects (more than 100 objects)
all dashboards have been updated
fixes #1392 for leaving SentryPeer log tag out
add wordpot dashboard
after discussion (#1486) and testing iptables-legacy is no longer required
include all kibana objects for installation
cleaning up some service scripts
This commit is contained in:
t3chn0m4g3 2024-03-18 16:19:49 +01:00
parent 3546e31a7c
commit 234fb16394
15 changed files with 564 additions and 41 deletions

535
compose/mini.yml Normal file
View file

@ -0,0 +1,535 @@
# T-Pot: MINI
version: '3.9'
networks:
adbhoney_local:
ciscoasa_local:
conpot_local_IEC104:
conpot_local_guardian_ast:
conpot_local_ipmi:
conpot_local_kamstrup_382:
dicompot_local:
honeypots_local:
medpot_local:
spiderfoot_local:
ewsposter_local:
services:
#########################################
#### DEV
#########################################
#### T-Pot Init - Never delete this!
#########################################
# T-Pot Init Service
tpotinit:
container_name: tpotinit
env_file:
- .env
restart: always
stop_grace_period: 60s
tmpfs:
- /tmp/etc:uid=2000,gid=2000
- /tmp/:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
- ${TPOT_DATA_PATH}:/data
- /var/run/docker.sock:/var/run/docker.sock:ro
##################
#### Honeypots
##################
# Adbhoney service
adbhoney:
container_name: adbhoney
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- adbhoney_local
ports:
- "5555:5555"
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
# Ciscoasa service
ciscoasa:
container_name: ciscoasa
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp/ciscoasa:uid=2000,gid=2000
networks:
- ciscoasa_local
ports:
- "5000:5000/udp"
- "8443:8443"
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
# Conpot IEC104 service
conpot_IEC104:
container_name: conpot_iec104
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
- CONPOT_TEMPLATE=IEC104
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_IEC104
ports:
- "161:161/udp"
- "2404:2404"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot guardian_ast service
conpot_guardian_ast:
container_name: conpot_guardian_ast
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
- CONPOT_TEMPLATE=guardian_ast
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_guardian_ast
ports:
- "10001:10001"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot ipmi
conpot_ipmi:
container_name: conpot_ipmi
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
- CONPOT_TEMPLATE=ipmi
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_ipmi
ports:
- "623:623/udp"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot kamstrup_382
conpot_kamstrup_382:
container_name: conpot_kamstrup_382
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
- CONPOT_TEMPLATE=kamstrup_382
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_kamstrup_382
ports:
- "1025:1025"
- "50100:50100"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Dicompot service
# Get the Horos Client for testing: https://horosproject.org/
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
dicompot:
container_name: dicompot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- dicompot_local
ports:
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
# Honeypots service
honeypots:
container_name: honeypots
stdin_open: true
tty: true
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp:uid=2000,gid=2000
networks:
- honeypots_local
ports:
- "21:21"
- "22:22"
- "23:23"
- "25:25"
- "53:53/udp"
- "80:80"
- "110:110"
- "123:123"
- "143:143"
- "161:161"
- "389:389"
- "443:443"
- "445:445"
- "631:631"
- "1080:1080"
- "1433:1433"
- "1521:1521"
- "3306:3306"
- "3389:3389"
- "5060:5060/tcp"
- "5060:5060/udp"
- "5432:5432"
- "5900:5900"
- "6379:6379"
- "6667:6667"
- "8080:8080"
- "9100:9100"
- "9200:9200"
- "11211:11211"
image: ${TPOT_REPO}/honeypots:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeypots/log:/var/log/honeypots
# Honeytrap service
honeytrap:
container_name: honeytrap
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp/honeytrap:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
# Medpot service
medpot:
container_name: medpot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- medpot_local
ports:
- "2575:2575"
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
##################
#### NSM
##################
# Fatt service
fatt:
container_name: fatt
restart: always
depends_on:
tpotinit:
condition: service_healthy
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
# P0f service
p0f:
container_name: p0f
restart: always
depends_on:
tpotinit:
condition: service_healthy
network_mode: "host"
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
# Suricata service
suricata:
container_name: suricata
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
##################
#### Tools
##################
#### ELK
## Elasticsearch service
elasticsearch:
container_name: elasticsearch
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
- ES_TMPDIR=/tmp
cap_add:
- IPC_LOCK
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
mem_limit: 4g
ports:
- "127.0.0.1:64298:9200"
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
## Kibana service
kibana:
container_name: kibana
restart: always
depends_on:
elasticsearch:
condition: service_healthy
mem_limit: 1g
ports:
- "127.0.0.1:64296:5601"
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
## Logstash service
logstash:
container_name: logstash
restart: always
depends_on:
elasticsearch:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
## Map Redis Service
map_redis:
container_name: map_redis
restart: always
depends_on:
tpotinit:
condition: service_healthy
stop_signal: SIGKILL
tty: true
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## Map Web Service
map_web:
container_name: map_web
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- MAP_COMMAND=AttackMapServer.py
stop_signal: SIGKILL
tty: true
ports:
- "127.0.0.1:64299:64299"
image: ${TPOT_REPO}/map:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
## Map Data Service
map_data:
container_name: map_data
restart: always
depends_on:
elasticsearch:
condition: service_healthy
environment:
- MAP_COMMAND=DataServer_v2.py
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
stop_signal: SIGKILL
tty: true
image: ${TPOT_REPO}/map:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
#### /ELK
# Ewsposter service
ewsposter:
container_name: ewsposter
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ewsposter_local
environment:
- EWS_HPFEEDS_ENABLE=false
- EWS_HPFEEDS_HOST=host
- EWS_HPFEEDS_PORT=port
- EWS_HPFEEDS_CHANNELS=channels
- EWS_HPFEEDS_IDENT=user
- EWS_HPFEEDS_SECRET=secret
- EWS_HPFEEDS_TLSCERT=false
- EWS_HPFEEDS_FORMAT=json
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
# Nginx service
nginx:
container_name: nginx
restart: always
environment:
- TPOT_OSTYPE=${TPOT_OSTYPE}
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /var/tmp/nginx/client_body
- /var/tmp/nginx/proxy
- /var/tmp/nginx/fastcgi
- /var/tmp/nginx/uwsgi
- /var/tmp/nginx/scgi
- /run
- /var/lib/nginx/tmp:uid=100,gid=82
network_mode: "host"
ports:
- "64297:64297"
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
# Spiderfoot service
spiderfoot:
container_name: spiderfoot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- spiderfoot_local
ports:
- "127.0.0.1:64303:8080"
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot

View file

@ -54,8 +54,8 @@ RUN apk -U --no-cache add \
cd /opt/ && \
git clone https://github.com/qeeqbox/honeypots && \
cd honeypots && \
# git checkout bee3147cf81837ba7639f1e27fe34d717ecccf29 && \
git checkout 5b3bfbecbf85c1f5235b320b333bdeff2d312372 && \
git checkout a990b2c1ab04ffafde229e478ced54ffbb665d5c && \
# git checkout 5b3bfbecbf85c1f5235b320b333bdeff2d312372 && \
# cp /root/dist/pyproject.toml . && \
pip3 install --break-system-packages --upgrade pip && \
pip3 install --break-system-packages . && \

View file

@ -38,7 +38,8 @@ services:
- "1521:1521"
- "3306:3306"
- "3389:3389"
- "5060:5060"
- "5060:5060/tcp"
- "5060:5060/udp"
- "5432:5432"
- "5900:5900"
- "6379:6379"

View file

@ -18,7 +18,7 @@ RUN apk --no-cache -U add \
grep \
iproute2 \
iptables \
iptables-legacy \
# iptables-legacy \
jq \
logrotate \
lsblk \

View file

@ -1,11 +1,4 @@
#!/bin/bash
# Run as root only.
myWHOAMI=$(whoami)
if [ "$myWHOAMI" != "root" ];
then
echo "Need to run as root ..."
exit
fi
if [ "$1" == "" ] || [ "$1" != "all" ] && [ "$1" != "base" ];
then
@ -22,7 +15,7 @@ myES="http://127.0.0.1:64298/"
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
if ! [ "$myESSTATUS" = "1" ]
then
echo "### Elasticsearch is not available, try starting via 'systemctl start tpot'."
echo "### Elasticsearch is not available."
exit
else
echo "### Elasticsearch is available, now continuing."
@ -30,32 +23,26 @@ if ! [ "$myESSTATUS" = "1" ]
fi
# Set vars
myCOUNT=1
myDATE=$(date +%Y%m%d%H%M)
myELKPATH="/data/elk/data"
myKIBANAINDEXNAME=$(curl -s -XGET ''$myES'_cat/indices/.kibana' | awk '{ print $4 }')
myKIBANAINDEXPATH=$myELKPATH/indices/$myKIBANAINDEXNAME
# Let's ensure normal operation on exit or if interrupted ...
function fuCLEANUP {
### Start ELK
systemctl start tpot
echo "### Now starting T-Pot ..."
}
trap fuCLEANUP EXIT
# Stop T-Pot to lift db lock
echo "### Now stopping T-Pot"
systemctl stop tpot
sleep 2
myPATH=$PWD
myELKPATH="data/elk/data"
myKIBANAINDEXNAMES=$(curl -s -XGET ''$myES'_cat/indices/.kibana_*?v&s=index&h=uuid' | tail -n +2)
#echo $myKIBANAINDEXNAMES
for i in $myKIBANAINDEXNAMES;
do
myKIBANAINDEXPATHS="$myKIBANAINDEXPATHS $myELKPATH/indices/$i"
done
# Backup DB in 2 flavors
cd $HOME/tpotce
echo "### Now backing up Elasticsearch folders ..."
if [ "$1" == "all" ];
then
tar cvfz "elkall_"$myDATE".tgz" $myELKPATH
tar cvfz $myPATH"/elkall_"$myDATE".tgz" $myELKPATH
elif [ "$1" == "base" ];
then
tar cvfz "elkbase_"$myDATE".tgz" $myKIBANAINDEXPATH
tar cvfz $myPATH"/elkbase_"$myDATE".tgz" $myKIBANAINDEXPATHS
fi
cd $myPATH

View file

@ -4,7 +4,7 @@ myES="http://127.0.0.1:64298/"
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
if ! [ "$myESSTATUS" = "1" ]
then
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
echo "### Elasticsearch is not available."
exit 1
else
echo "### Elasticsearch is available, now continuing."

View file

@ -44,14 +44,14 @@ function fuSETRULES {
### Setting up iptables-legacy rules for honeytrap
if [ "$myNFQCHECK" == "honeytrap" ];
then
iptables-legacy -w -A INPUT -s 127.0.0.1 -j ACCEPT
iptables-legacy -w -A INPUT -d 127.0.0.1 -j ACCEPT
iptables -w -A INPUT -s 127.0.0.1 -j ACCEPT
iptables -w -A INPUT -d 127.0.0.1 -j ACCEPT
for myPORT in $myRULESPORTS; do
iptables-legacy -w -A INPUT -p tcp --dport $myPORT -j ACCEPT
iptables -w -A INPUT -p tcp --dport $myPORT -j ACCEPT
done
iptables-legacy -w -A INPUT -p tcp --syn -m state --state NEW -j NFQUEUE
iptables -w -A INPUT -p tcp --syn -m state --state NEW -j NFQUEUE
fi
### Setting up iptables-legacy rules for glutton
@ -71,14 +71,14 @@ function fuUNSETRULES {
### Removing iptables-legacy rules for honeytrap
if [ "$myNFQCHECK" == "honeytrap" ];
then
iptables-legacy -w -D INPUT -s 127.0.0.1 -j ACCEPT
iptables-legacy -w -D INPUT -d 127.0.0.1 -j ACCEPT
iptables -w -D INPUT -s 127.0.0.1 -j ACCEPT
iptables -w -D INPUT -d 127.0.0.1 -j ACCEPT
for myPORT in $myRULESPORTS; do
iptables-legacy -w -D INPUT -p tcp --dport $myPORT -j ACCEPT
iptables -w -D INPUT -p tcp --dport $myPORT -j ACCEPT
done
iptables-legacy -w -D INPUT -p tcp --syn -m state --state NEW -j NFQUEUE
iptables -w -D INPUT -p tcp --syn -m state --state NEW -j NFQUEUE
fi
### Removing iptables-legacy rules for glutton

Binary file not shown.