re-implement distributed feature, without ssh

add sensor compose file
add distributed option to tpot config
housekeeping / cleanup
This commit is contained in:
t3chn0m4g3 2024-01-05 20:19:50 +01:00
parent c634d294c7
commit 1da37b5f85
16 changed files with 726 additions and 235 deletions

680
compose/sensor.yml Normal file
View file

@ -0,0 +1,680 @@
# T-Pot: SENSOR
version: '3.9'
networks:
adbhoney_local:
ciscoasa_local:
citrixhoneypot_local:
conpot_local_IEC104:
conpot_local_guardian_ast:
conpot_local_ipmi:
conpot_local_kamstrup_382:
cowrie_local:
ddospot_local:
dicompot_local:
dionaea_local:
elasticpot_local:
heralding_local:
ipphoney_local:
mailoney_local:
medpot_local:
redishoneypot_local:
sentrypeer_local:
tanner_local:
ewsposter_local:
services:
#########################################
#### DEV
#########################################
#### T-Pot Init - Never delete this!
#########################################
# T-Pot Init Service
tpotinit:
container_name: tpotinit
env_file:
- .env
restart: always
tmpfs:
- /tmp/etc:uid=2000,gid=2000
- /tmp/:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
- ${TPOT_DATA_PATH}:/data
##################
#### Honeypots
##################
# Adbhoney service
adbhoney:
container_name: adbhoney
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- adbhoney_local
ports:
- "5555:5555"
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
# Ciscoasa service
ciscoasa:
container_name: ciscoasa
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp/ciscoasa:uid=2000,gid=2000
networks:
- ciscoasa_local
ports:
- "5000:5000/udp"
- "8443:8443"
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
# CitrixHoneypot service
citrixhoneypot:
container_name: citrixhoneypot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- citrixhoneypot_local
ports:
- "443:443"
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/citrixhoneypot/logs:/opt/citrixhoneypot/logs
# Conpot IEC104 service
conpot_IEC104:
container_name: conpot_iec104
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
- CONPOT_TEMPLATE=IEC104
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_IEC104
ports:
- "161:161/udp"
- "2404:2404"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot guardian_ast service
conpot_guardian_ast:
container_name: conpot_guardian_ast
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
- CONPOT_TEMPLATE=guardian_ast
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_guardian_ast
ports:
- "10001:10001"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot ipmi
conpot_ipmi:
container_name: conpot_ipmi
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
- CONPOT_TEMPLATE=ipmi
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_ipmi
ports:
- "623:623/udp"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot kamstrup_382
conpot_kamstrup_382:
container_name: conpot_kamstrup_382
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
- CONPOT_TEMPLATE=kamstrup_382
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_kamstrup_382
ports:
- "1025:1025"
- "50100:50100"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Cowrie service
cowrie:
container_name: cowrie
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp/cowrie:uid=2000,gid=2000
- /tmp/cowrie/data:uid=2000,gid=2000
networks:
- cowrie_local
ports:
- "22:22"
- "23:23"
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
# Ddospot service
ddospot:
container_name: ddospot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ddospot_local
ports:
- "19:19/udp"
- "53:53/udp"
- "123:123/udp"
# - "161:161/udp"
- "1900:1900/udp"
image: ${TPOT_REPO}/ddospot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ddospot/log:/opt/ddospot/ddospot/logs
- ${TPOT_DATA_PATH}/ddospot/bl:/opt/ddospot/ddospot/bl
- ${TPOT_DATA_PATH}/ddospot/db:/opt/ddospot/ddospot/db
# Dicompot service
# Get the Horos Client for testing: https://horosproject.org/
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
dicompot:
container_name: dicompot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- dicompot_local
ports:
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
# Dionaea service
dionaea:
container_name: dionaea
stdin_open: true
tty: true
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- dionaea_local
ports:
- "20:20"
- "21:21"
- "42:42"
- "69:69/udp"
- "81:81"
- "135:135"
# - "443:443"
- "445:445"
- "1433:1433"
- "1723:1723"
- "1883:1883"
- "3306:3306"
# - "5060:5060"
# - "5060:5060/udp"
# - "5061:5061"
- "27017:27017"
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
# ElasticPot service
elasticpot:
container_name: elasticpot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- elasticpot_local
ports:
- "9200:9200"
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
# Heralding service
heralding:
container_name: heralding
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp/heralding:uid=2000,gid=2000
networks:
- heralding_local
ports:
# - "21:21"
# - "22:22"
# - "23:23"
# - "25:25"
# - "80:80"
- "110:110"
- "143:143"
# - "443:443"
- "465:465"
- "993:993"
- "995:995"
# - "3306:3306"
# - "3389:3389"
- "1080:1080"
- "5432:5432"
- "5900:5900"
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
# Honeytrap service
honeytrap:
container_name: honeytrap
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp/honeytrap:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
# Ipphoney service
ipphoney:
container_name: ipphoney
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ipphoney_local
ports:
- "631:631"
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
# Mailoney service
mailoney:
container_name: mailoney
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- HPFEEDS_SERVER=
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=20000
- HPFEEDS_CHANNELPREFIX=prefix
networks:
- mailoney_local
ports:
- "25:25"
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
# Medpot service
medpot:
container_name: medpot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- medpot_local
ports:
- "2575:2575"
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
# Redishoneypot service
redishoneypot:
container_name: redishoneypot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- redishoneypot_local
ports:
- "6379:6379"
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
# SentryPeer service
sentrypeer:
container_name: sentrypeer
restart: always
depends_on:
tpotinit:
condition: service_healthy
# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# environment:
# - SENTRYPEER_PEER_TO_PEER=0
networks:
- sentrypeer_local
ports:
# - "4222:4222/udp"
- "5060:5060/udp"
# - "127.0.0.1:8082:8082"
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
#### Snare / Tanner
## Tanner Redis Service
tanner_redis:
container_name: tanner_redis
restart: always
depends_on:
tpotinit:
condition: service_healthy
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## PHP Sandbox service
tanner_phpox:
container_name: tanner_phpox
restart: always
depends_on:
tpotinit:
condition: service_healthy
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## Tanner API Service
tanner_api:
container_name: tanner_api
restart: always
depends_on:
- tanner_redis
tmpfs:
- /tmp/tanner:uid=2000,gid=2000
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
command: tannerapi
## Tanner Service
tanner:
container_name: tanner
restart: always
depends_on:
- tanner_api
- tanner_phpox
tmpfs:
- /tmp/tanner:uid=2000,gid=2000
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
command: tanner
read_only: true
volumes:
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
## Snare Service
snare:
container_name: snare
restart: always
depends_on:
- tanner
tty: true
networks:
- tanner_local
ports:
- "80:80"
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
##################
#### NSM
##################
# Fatt service
fatt:
container_name: fatt
restart: always
depends_on:
tpotinit:
condition: service_healthy
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
# P0f service
p0f:
container_name: p0f
restart: always
depends_on:
tpotinit:
condition: service_healthy
network_mode: "host"
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
# Suricata service
suricata:
container_name: suricata
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
# For ET Pro ruleset replace "OPEN" with your OINKCODE
- OINKCODE=OPEN
# Loading externel Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
##################
#### Tools
##################
#### ELK
## Logstash service
logstash:
container_name: logstash
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
env_file:
- .env
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
#### /ELK
# Ewsposter service
ewsposter:
container_name: ewsposter
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ewsposter_local
environment:
- EWS_HPFEEDS_ENABLE=false
- EWS_HPFEEDS_HOST=host
- EWS_HPFEEDS_PORT=port
- EWS_HPFEEDS_CHANNELS=channels
- EWS_HPFEEDS_IDENT=user
- EWS_HPFEEDS_SECRET=secret
- EWS_HPFEEDS_TLSCERT=false
- EWS_HPFEEDS_FORMAT=json
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip

View file

@ -9,12 +9,9 @@ COPY dist/ /root/dist/
RUN apt-get update -y && \
apt-get install -y \
aria2 \
autossh \
bash \
bzip2 \
curl \
# openjdk-11-jre \
openssh-client && \
curl && \
#
# Determine arch, get and install packages
ARCH=$(arch) && \

View file

@ -42,25 +42,27 @@ if [ "$myCHECK" == "0" ];
echo "Cannot reach Listbot, starting Logstash without latest translation maps."
fi
# Distributed T-Pot installation needs a different pipeline config and autossh tunnel.
if [ "$MY_TPOT_TYPE" == "SENSOR" ];
# Distributed T-Pot installation needs a different pipeline config
if [ "$TPOT_TYPE" == "SENSOR" ];
then
echo
echo "Distributed T-Pot setup, sending T-Pot logs to $MY_HIVE_IP."
echo "Distributed T-Pot setup, sending T-Pot logs to $TPOT_HIVE_IP."
echo
echo "T-Pot type: $MY_TPOT_TYPE"
echo "Keyfile used: $MY_SENSOR_PRIVATEKEYFILE"
echo "Hive username: $MY_HIVE_USERNAME"
echo "Hive IP: $MY_HIVE_IP"
echo "T-Pot type: $TPOT_TYPE"
echo "Hive IP: $TPOT_HIVE_IP"
echo
# Ensure correct file permissions for private keyfile or SSH will ask for password
chmod 600 $MY_SENSOR_PRIVATEKEYFILE
cp /usr/share/logstash/config/pipelines_sensor.yml /usr/share/logstash/config/pipelines.yml
autossh -f -M 0 -4 -l $MY_HIVE_USERNAME -i $MY_SENSOR_PRIVATEKEYFILE -p 64295 -N -L64305:127.0.0.1:64305 $MY_HIVE_IP -o "ServerAliveInterval 30" -o "ServerAliveCountMax 3" -o "StrictHostKeyChecking=no" -o "UserKnownHostsFile=/dev/null"
fi
if [ "$MY_TPOT_TYPE" != "SENSOR" ];
if [ "$TPOT_TYPE" != "SENSOR" ];
then
echo
echo "This is a T-Pot STANDARD / HIVE installation."
echo
echo "T-Pot type: $TPOT_TYPE"
echo
# Index Management is happening through ILM, but we need to put T-Pot ILM setting on ES.
myTPOTILM=$(curl -s -XGET "http://elasticsearch:9200/_ilm/policy/tpot" | grep "Lifecycle policy not found: tpot" -c)
if [ "$myTPOTILM" == "1" ];

View file

@ -708,7 +708,10 @@ output {
id => "${MY_HOSTNAME}"
codec => "json"
format => "json_batch"
url => "http://127.0.0.1:64305"
url => "https://${TPOT_HIVE_IP}:64294"
cacert => "/data/hive.crt"
headers => {
"Authorization" => "Basic ${TPOT_HIVE_USER}"
}
}
}

View file

@ -1,77 +0,0 @@
#!/bin/bash
# Make sure script is started as non-root.
myWHOAMI=$(whoami)
if [ "$myWHOAMI" = "root" ]
then
echo "Need to run as non-root ..."
echo ""
exit
fi
# set vars, check deps
myPAM_COCKPIT_FILE="/etc/pam.d/cockpit"
if ! [ -s "$myPAM_COCKPIT_FILE" ];
then
echo "### Cockpit PAM module config does not exist. Something went wrong."
echo ""
exit 1
fi
myPAM_COCKPIT_GA="
# google authenticator for two-factor
auth required pam_google_authenticator.so
"
myAUTHENTICATOR=$(which google-authenticator)
if [ "$myAUTHENTICATOR" == "" ];
then
echo "### Could not locate google-authenticator, trying to install (if asked provide root password)."
echo ""
sudo apt-get update
sudo apt-get install -y libpam-google-authenticator
exec "$1" "$2"
exit 1
fi
# write PAM changes
function fuWRITE_PAM_CHANGES {
myCHECK=$(cat $myPAM_COCKPIT_FILE | grep -c "google")
if ! [ "$myCHECK" == "0" ];
then
echo "### PAM config already enabled. Skipped."
echo ""
else
echo "### Updating PAM config for Cockpit (if asked provide root password)."
echo "$myPAM_COCKPIT_GA" | sudo tee -a $myPAM_COCKPIT_FILE
sudo systemctl restart cockpit
fi
}
# create 2fa
function fuGEN_TOKEN {
echo "### Now generating token for Google Authenticator."
echo ""
google-authenticator -t -d -r 3 -R 30 -w 17
}
# main
echo "### This script will enable Two Factor Authentication for Cockpit."
echo ""
echo "### Please download one of the many authenticator apps from the appstore of your choice."
echo ""
while true;
do
read -p "### Ready to start (y/n)? " myANSWER
case $myANSWER in
[Yy]* ) echo "### OK. Starting ..."; break;;
[Nn]* ) echo "### Exiting."; exit;;
esac
done
fuWRITE_PAM_CHANGES
fuGEN_TOKEN
echo "Done. Re-run this script by every user who needs Cockpit access."
echo ""

View file

@ -1,73 +0,0 @@
#!/bin/bash
# Run as root only.
myWHOAMI=$(whoami)
if [ "$myWHOAMI" != "root" ]
then
echo "Need to run as root ..."
exit
fi
myPARAM="$1"
if [[ $myPARAM =~ ^([1-9]|[1-9][0-9]|[1-9][0-9][0-9])$ ]];
then
watch --color -n $myPARAM "$0"
exit
fi
# Show current status of T-Pot containers
myCONTAINERS="$(cat /opt/tpot/etc/tpot.yml | grep -v '#' | grep container_name | cut -d: -f2 | sort | tr -d " ")"
myRED=""
myGREEN=""
myBLUE=""
myWHITE=""
myMAGENTA=""
# Blackhole Status
myBLACKHOLE_STATUS=$(ip r | grep "blackhole" -c)
if [ "$myBLACKHOLE_STATUS" -gt "500" ];
then
myBLACKHOLE_STATUS="${myGREEN}ENABLED"
else
myBLACKHOLE_STATUS="${myRED}DISABLED"
fi
function fuGETTPOT_STATUS {
# T-Pot Status
myTPOT_STATUS=$(systemctl status tpot | grep "Active" | awk '{ print $2 }')
if [ "$myTPOT_STATUS" == "active" ];
then
echo "${myGREEN}ACTIVE"
else
echo "${myRED}INACTIVE"
fi
}
function fuGETSTATUS {
grc --colour=on docker ps -f status=running -f status=exited --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep -v "NAME" | sort
}
function fuGETSYS {
printf "[ ========| System |======== ]\n"
printf "${myBLUE}%+11s ${myWHITE}%-20s\n" "DATE: " "$(date)"
printf "${myBLUE}%+11s ${myWHITE}%-20s\n" "UPTIME: " "$(grc --colour=on uptime)"
printf "${myMAGENTA}%+11s %-20s\n" "T-POT: " "$(fuGETTPOT_STATUS)"
printf "${myMAGENTA}%+11s %-20s\n" "BLACKHOLE: " "$myBLACKHOLE_STATUS${myWHITE}"
echo
}
myDPS=$(fuGETSTATUS)
myDPSNAMES=$(echo "$myDPS" | awk '{ print $1 }' | sort)
fuGETSYS
printf "%-21s %-28s %s\n" "NAME" "STATUS" "PORTS"
if [ "$myDPS" != "" ];
then
echo "$myDPS"
fi
for i in $myCONTAINERS; do
myAVAIL=$(echo "$myDPSNAMES" | grep -o "$i" | uniq | wc -l)
if [ "$myAVAIL" = "0" ];
then
printf "%-28s %-28s\n" "$myRED$i" "DOWN$myWHITE"
fi
done

View file

@ -1,45 +0,0 @@
#!/bin/bash
# Got root?
myWHOAMI=$(whoami)
if [ "$myWHOAMI" != "root" ]
then
echo "Need to run as root ..."
exit
fi
# Only run with command switch
if [ "$1" != "-y" ]; then
echo "### Setting up docker for Multi Arch Builds."
echo "### Use on x64 only!"
echo "### Run with -y to install!"
echo
exit
fi
# Main
mkdir -p /root/.docker/cli-plugins/
cd /root/.docker/cli-plugins/
wget https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-amd64 -O docker-buildx
chmod +x docker-buildx
docker buildx ls
# We need to create a new builder as the default one cannot handle multi-arch builds
# https://docs.docker.com/desktop/multi-arch/
docker buildx create --name mybuilder
# Set as default
docker buildx use mybuilder
# We need to install emulators, arm64 should be fine for now
# https://github.com/tonistiigi/binfmt/
docker run --privileged --rm tonistiigi/binfmt --install arm64
# Check if everything is setup correctly
docker buildx inspect --bootstrap
echo
echo "### Done."
echo
echo "Example: docker buildx build --platform linux/amd64,linux/arm64 -t username/demo:latest --push ."
echo "Docs: https://docs.docker.com/desktop/multi-arch/"

View file

@ -1,7 +1,6 @@
#!/bin/bash
# Let's add the first local ip to the /tmp/etc/issue and external ip to ews.ip file
# If the external IP cannot be detected, the internal IP will be inherited.
#myCHECKIFSENSOR=$(head -n 1 /opt/tpot/etc/tpot.yml | grep "Sensor" | wc -l)
myUUID=$(cat /data/uuid)
myLOCALIP=$(ip address show | awk '/inet .*brd/{split($2,a,"/"); print a[1]; exit}')
myEXTIP=$(/opt/tpot/bin/myip.sh)
@ -10,14 +9,6 @@ if [ "$myEXTIP" = "" ];
myEXTIP=$myLOCALIP
fi
# Load Blackhole routes if enabled
#myBLACKHOLE_FILE1="/etc/blackhole/mass_scanner.txt"
#myBLACKHOLE_FILE2="/etc/blackhole/mass_scanner_cidr.txt"
#if [ -f "$myBLACKHOLE_FILE1" ] || [ -f "$myBLACKHOLE_FILE2" ];
# then
# /opt/tpot/bin/blackhole.sh add
#fi
myBLACKHOLE_STATUS=$(ip r | grep "blackhole" -c)
if [ "$myBLACKHOLE_STATUS" -gt "500" ];
then
@ -54,16 +45,5 @@ MY_INTIP=$myLOCALIP
MY_HOSTNAME=$HOSTNAME
EOF
#if [ -s "/data/elk/logstash/ls_environment" ];
# then
# source /data/elk/logstash/ls_environment
# tee -a /data/tpot/etc/compose/elk_environment << EOF
#MY_TPOT_TYPE=$MY_TPOT_TYPE
#MY_SENSOR_PRIVATEKEYFILE=$MY_SENSOR_PRIVATEKEYFILE
#MY_HIVE_USERNAME=$MY_HIVE_USERNAME
#MY_HIVE_IP=$MY_HIVE_IP
#EOF
#fi
chown tpot:tpot /data/ews/conf/ews.ip
chmod 770 /data/ews/conf/ews.ip

View file

@ -21,6 +21,30 @@ TPOT_BLACKHOLE=DISABLED
# if you just do not need any of the logfiles.
TPOT_PERSISTENCE=on
# T-Pot Type
# HIVE: This is the default and offers everything to connect T-Pot sensors.
# SENSOR: This needs to be used when running a sensor. Be aware to adjust all other
# settings as well.
# 1. You will need to copy compose/sensor.yml to ./docker-comopose.yml
# 2. From HIVE host you will need to copy ~/data/nginx/cert/nginx.crt to
# your SENSOR host to ~/data/hive.crt
# 3. On HIVE: Create a web user per SENSOR on HIVE and provide credentials below
# Create credentials with 'htpasswd data/nginx/conf/lswebpasswd <username>'
# 4. On SENSOR: Provide username / password from (3) for TPOT_HIVE_USER as base64 encoded string:
# "echo -n 'username:password' | base64"
TPOT_TYPE=HIVE
# T-Pot Hive User (only relevant for SENSOR deployment)
# <empty>: This is empty by default.
# <base64 encoded string>: Provide a base64 encoded string "echo -n 'username:password' | base64"
# i.e. TPOT_HIVE_USER='dXNlcm5hbWU6cGFzc3dvcmQ='
TPOT_HIVE_USER=
# T-Pot Hive IP (only relevant for SENSOR deployment)
# <empty>: This is empty by default.
# <IP, FQDN>: This can be either a IP (i.e. 192.168.1.1) or a FQDN (i.e. foo.bar.local)
TPOT_HIVE_IP=
# T-Pot AttackMap Text Output
# ENABLED: This is the default and the docker container map_data will print events to the console.
# DISABLED: Printing events to the console is disabled.

View file

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Got root?
myWHOAMI=$(whoami)