Rework .env / env.example

Add more functions to customizer.py (improve port and service checks, improve user output)
Adjust docker-compose files
This commit is contained in:
Marco Ochse 2024-02-13 19:02:40 +01:00
parent e7aecf560d
commit ef2f5b3f93
11 changed files with 774 additions and 72 deletions

48
.env
View file

@ -1,5 +1,9 @@
# T-Pot config file. Do not remove.
###############################################
# T-Pot Base Settings - Adjust to your needs. #
###############################################
# Set Web username and password here, it will be used to create the Nginx password file nginxpasswd.
# Use 'htpasswd -n <username>' to create the WEB_USER if you want to manually deploy T-Pot
# Example: 'htpasswd -n tsec' will print tsec:$apr1$TdJGdsss$6yLsxPmOcXb2kaEZ7lKva0
@ -21,6 +25,30 @@ TPOT_BLACKHOLE=DISABLED
# if you just do not need any of the logfiles.
TPOT_PERSISTENCE=on
# T-Pot Type
# HIVE: This is the default and offers everything to connect T-Pot sensors.
# SENSOR: This needs to be used when running a sensor. Be aware to adjust all other
# settings as well.
# 1. You will need to copy compose/sensor.yml to ./docker-comopose.yml
# 2. From HIVE host you will need to copy ~/tpotce/data/nginx/cert/nginx.crt to
# your SENSOR host to ~/tpotce/data/hive.crt
# 3. On HIVE: Create a web user per SENSOR on HIVE and provide credentials below
# Create credentials with 'htpasswd ~/tpotce/data/nginx/conf/lswebpasswd <username>'
# 4. On SENSOR: Provide username / password from (3) for TPOT_HIVE_USER as base64 encoded string:
# "echo -n 'username:password' | base64"
TPOT_TYPE=HIVE
# T-Pot Hive User (only relevant for SENSOR deployment)
# <empty>: This is empty by default.
# <base64 encoded string>: Provide a base64 encoded string "echo -n 'username:password' | base64"
# i.e. TPOT_HIVE_USER='dXNlcm5hbWU6cGFzc3dvcmQ='
TPOT_HIVE_USER=
# T-Pot Hive IP (only relevant for SENSOR deployment)
# <empty>: This is empty by default.
# <IP, FQDN>: This can be either a IP (i.e. 192.168.1.1) or a FQDN (i.e. foo.bar.local)
TPOT_HIVE_IP=
# T-Pot AttackMap Text Output
# ENABLED: This is the default and the docker container map_data will print events to the console.
# DISABLED: Printing events to the console is disabled.
@ -33,6 +61,26 @@ TPOT_ATTACKMAP_TEXT=ENABLED
# Examples: America/New_York, Asia/Taipei, Australia/Melbourne, Europe/Athens, Europe/Berlin
TPOT_ATTACKMAP_TEXT_TIMEZONE=UTC
###################################################################################
# Honeypots / Tools settings
###################################################################################
# Some services / tools offer adjustments using ENVs which can be adjusted here.
###################################################################################
# SentryPeer P2P mode
# Exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# 0: This is the default, P2P mode is disabled.
# 1: Enable P2P mode.
SENTRYPEER_PEER_TO_PEER=0
# Suricata ET Pro ruleset
# OPEN: This is the default and will the ET Open ruleset
# OINKCODE: Replace OPEN with your Oinkcode to use the ET Pro ruleset
OINKCODE=OPEN
###################################################################################
# NEVER MAKE CHANGES TO THIS SECTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!! #
###################################################################################

View file

@ -2,9 +2,9 @@ from datetime import datetime
import yaml
version = \
"""# T-Pot Service Builder v0.1
"""# T-Pot Service Builder v0.2
This script is intended as a kickstarter for users who want to build a customzized docker-compose.yml for use with T-Pot.
This script is intended as a kickstarter for users who want to build a customized docker-compose.yml for use with T-Pot.
T-Pot Service Builder will ask you for all the docker services you wish to include in your docker-compose configuration file.
The configuration file will be checked for conflicting ports as some of the honeypots are meant to work on certain ports.
@ -42,30 +42,33 @@ def prompt_service_include(service_name):
def check_port_conflicts(selected_services):
all_ports = []
for config in selected_services.values():
all_ports = {}
conflict_ports = []
for service_name, config in selected_services.items():
ports = config.get('ports', [])
for port in ports:
# Split the port mapping and take only the host port part
parts = port.split(':')
if len(parts) == 3:
# Format: host_ip:host_port:container_port
host_port = parts[1]
elif len(parts) == 2:
# Format: host_port:container_port (or host_ip:host_port for default container_port)
host_port = parts[0] if parts[1].isdigit() else parts[1]
else:
# Single value, treated as host_port
host_port = parts[0]
host_port = parts[1] if len(parts) == 3 else (parts[0] if parts[1].isdigit() else parts[1])
# Check for port conflict
# Check for port conflict and associate it with the service name
if host_port in all_ports:
print_color(f"Port conflict detected: {host_port}", "red")
conflict_ports.append((service_name, host_port))
if all_ports[host_port] not in [service for service, _ in conflict_ports]:
conflict_ports.append((all_ports[host_port], host_port))
else:
all_ports[host_port] = service_name
if conflict_ports:
print_color("Port conflict(s) detected:", "red")
for service, port in conflict_ports:
print_color(f"{service}: {port}", "red")
return True
all_ports.append(host_port)
return False
def print_color(text, color):
colors = {
"red": "\033[91m",
@ -79,16 +82,25 @@ def enforce_dependencies(selected_services, services):
# If snare or any tanner services are selected, ensure all are enabled
tanner_services = {'snare', 'tanner', 'tanner_redis', 'tanner_phpox', 'tanner_api'}
if tanner_services.intersection(selected_services):
print_color("For Snare / Tanner to work all required services have been added to your configuration.", "green")
for service in tanner_services:
selected_services[service] = services[service]
# If kibana is enabled, also enable elasticsearch
if 'kibana' in selected_services:
selected_services['elasticsearch'] = services['elasticsearch']
print_color("Kibana requires Elasticsearch which has been added to your configuration.", "green")
# If spiderfoot is enabled, also enable nginx
if 'spiderfoot' in selected_services:
selected_services['nginx'] = services['nginx']
print_color("Spiderfoot requires Nginx which has been added to your configuration.","green")
# If any map services are detected, enable logstash, elasticsearch, nginx, and all map services
map_services = {'map_web', 'map_redis', 'map_data'}
if map_services.intersection(selected_services):
print_color("For Map to work all required services have been added to your configuration.", "green")
for service in map_services.union({'elasticsearch', 'nginx'}):
selected_services[service] = services[service]
@ -96,9 +108,7 @@ def enforce_dependencies(selected_services, services):
if 'honeytrap' in selected_services and 'glutton' in selected_services:
# Remove glutton and notify
del selected_services['glutton']
print_color(
"Honeytrap and Glutton cannot be active at the same time. Glutton has been removed from your configuration.",
"red")
print_color("Honeytrap and Glutton cannot be active at the same time. Glutton has been removed from your configuration.","red")
def remove_unused_networks(selected_services, services, networks):

View file

@ -454,11 +454,8 @@ services:
depends_on:
tpotinit:
condition: service_healthy
# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# environment:
# - SENTRYPEER_PEER_TO_PEER=0
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
@ -600,9 +597,8 @@ services:
tpotinit:
condition: service_healthy
environment:
# For ET Pro ruleset replace "OPEN" with your OINKCODE
- OINKCODE=OPEN
# Loading externel Rules from URL
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
@ -676,6 +672,11 @@ services:
- nginx_local
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}

608
compose/mobile.yml Normal file
View file

@ -0,0 +1,608 @@
# T-Pot: MOBILE
# Note: This docker compose file has been adjusted to limit the number of tools, services and honeypots to run
# T-Pot on a Raspberry Pi 4 (8GB of RAM).
# The standard docker compose file should work mostly fine (depending on traffic) if you do not enable a
# desktop environment such as LXDE and meet the minimum requirements of 8GB RAM.
version: '3.9'
networks:
ciscoasa_local:
citrixhoneypot_local:
conpot_local_IEC104:
conpot_local_ipmi:
conpot_local_kamstrup_382:
cowrie_local:
dicompot_local:
dionaea_local:
elasticpot_local:
heralding_local:
ipphoney_local:
log4pot_local:
mailoney_local:
medpot_local:
redishoneypot_local:
sentrypeer_local:
tanner_local:
ewsposter_local:
services:
#########################################
#### DEV
#########################################
#### T-Pot Init - Never delete this!
#########################################
# T-Pot Init Service
tpotinit:
container_name: tpotinit
env_file:
- .env
restart: always
tmpfs:
- /tmp/etc:uid=2000,gid=2000
- /tmp/:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
- ${TPOT_DATA_PATH}:/data
##################
#### Honeypots
##################
# Ciscoasa service
ciscoasa:
container_name: ciscoasa
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp/ciscoasa:uid=2000,gid=2000
networks:
- ciscoasa_local
ports:
- "5000:5000/udp"
- "8443:8443"
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
# CitrixHoneypot service
citrixhoneypot:
container_name: citrixhoneypot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- citrixhoneypot_local
ports:
- "443:443"
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/citrixhoneypot/logs:/opt/citrixhoneypot/logs
# Conpot IEC104 service
conpot_IEC104:
container_name: conpot_iec104
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
- CONPOT_TEMPLATE=IEC104
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_IEC104
ports:
- "161:161/udp"
- "2404:2404"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot ipmi
conpot_ipmi:
container_name: conpot_ipmi
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
- CONPOT_TEMPLATE=ipmi
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_ipmi
ports:
- "623:623/udp"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot kamstrup_382
conpot_kamstrup_382:
container_name: conpot_kamstrup_382
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
- CONPOT_TEMPLATE=kamstrup_382
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_kamstrup_382
ports:
- "1025:1025"
- "50100:50100"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Cowrie service
cowrie:
container_name: cowrie
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp/cowrie:uid=2000,gid=2000
- /tmp/cowrie/data:uid=2000,gid=2000
networks:
- cowrie_local
ports:
- "22:22"
- "23:23"
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
# Dicompot service
# Get the Horos Client for testing: https://horosproject.org/
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
dicompot:
container_name: dicompot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- dicompot_local
ports:
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
# Dionaea service
dionaea:
container_name: dionaea
stdin_open: true
tty: true
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- dionaea_local
ports:
- "20:20"
- "21:21"
- "42:42"
- "69:69/udp"
- "81:81"
- "135:135"
# - "443:443"
- "445:445"
- "1433:1433"
- "1723:1723"
- "1883:1883"
- "3306:3306"
# - "5060:5060"
# - "5060:5060/udp"
# - "5061:5061"
- "27017:27017"
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
# ElasticPot service
elasticpot:
container_name: elasticpot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- elasticpot_local
ports:
- "9200:9200"
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
# Heralding service
heralding:
container_name: heralding
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp/heralding:uid=2000,gid=2000
networks:
- heralding_local
ports:
# - "21:21"
# - "22:22"
# - "23:23"
# - "25:25"
# - "80:80"
- "110:110"
- "143:143"
# - "443:443"
- "465:465"
- "993:993"
- "995:995"
# - "3306:3306"
# - "3389:3389"
- "1080:1080"
- "5432:5432"
- "5900:5900"
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
# Honeytrap service
honeytrap:
container_name: honeytrap
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp/honeytrap:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
# Ipphoney service
ipphoney:
container_name: ipphoney
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- ipphoney_local
ports:
- "631:631"
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
# Mailoney service
mailoney:
container_name: mailoney
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- HPFEEDS_SERVER=
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=20000
- HPFEEDS_CHANNELPREFIX=prefix
networks:
- mailoney_local
ports:
- "25:25"
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
# Log4pot service
log4pot:
container_name: log4pot
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp:uid=2000,gid=2000
networks:
- log4pot_local
ports:
# - "80:8080"
# - "443:8080"
- "8080:8080"
# - "9200:8080"
- "25565:8080"
image: ${TPOT_REPO}/log4pot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/log4pot/log:/var/log/log4pot/log
- ${TPOT_DATA_PATH}/log4pot/payloads:/var/log/log4pot/payloads
# Medpot service
medpot:
container_name: medpot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- medpot_local
ports:
- "2575:2575"
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
# Redishoneypot service
redishoneypot:
container_name: redishoneypot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- redishoneypot_local
ports:
- "6379:6379"
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
# SentryPeer service
sentrypeer:
container_name: sentrypeer
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
# - "4222:4222/udp"
- "5060:5060/udp"
# - "127.0.0.1:8082:8082"
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
#### Snare / Tanner
## Tanner Redis Service
tanner_redis:
container_name: tanner_redis
restart: always
depends_on:
logstash:
condition: service_healthy
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## PHP Sandbox service
tanner_phpox:
container_name: tanner_phpox
restart: always
depends_on:
logstash:
condition: service_healthy
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## Tanner API Service
tanner_api:
container_name: tanner_api
restart: always
depends_on:
- tanner_redis
tmpfs:
- /tmp/tanner:uid=2000,gid=2000
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
command: tannerapi
## Tanner Service
tanner:
container_name: tanner
restart: always
depends_on:
- tanner_api
- tanner_phpox
tmpfs:
- /tmp/tanner:uid=2000,gid=2000
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
command: tanner
read_only: true
volumes:
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
## Snare Service
snare:
container_name: snare
restart: always
depends_on:
- tanner
tty: true
networks:
- tanner_local
ports:
- "80:80"
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
##################
#### Tools
##################
#### ELK
## Elasticsearch service
elasticsearch:
container_name: elasticsearch
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
- ES_TMPDIR=/tmp
cap_add:
- IPC_LOCK
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
mem_limit: 4g
ports:
- "127.0.0.1:64298:9200"
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
## Logstash service
logstash:
container_name: logstash
restart: always
depends_on:
elasticsearch:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
#### /ELK
# Ewsposter service
ewsposter:
container_name: ewsposter
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- ewsposter_local
environment:
- EWS_HPFEEDS_ENABLE=false
- EWS_HPFEEDS_HOST=host
- EWS_HPFEEDS_PORT=port
- EWS_HPFEEDS_CHANNELS=channels
- EWS_HPFEEDS_IDENT=user
- EWS_HPFEEDS_SECRET=secret
- EWS_HPFEEDS_TLSCERT=false
- EWS_HPFEEDS_FORMAT=json
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip

View file

@ -1,8 +1,8 @@
# T-Pot: RASPBERRY_SHOWCASE
# T-Pot: MOBILE
# Note: This docker compose file has been adjusted to limit the number of tools, services and honeypots to run
# T-Pot on a Raspberry Pi 4 (8GB of RAM, SSD).
# T-Pot on a Raspberry Pi 4 (8GB of RAM).
# The standard docker compose file should work mostly fine (depending on traffic) if you do not enable a
# desktop environment such as LXDE and meet the minimum requirements of 8GB and a SSD.
# desktop environment such as LXDE and meet the minimum requirements of 8GB RAM.
version: '3.9'
networks:
@ -433,11 +433,8 @@ services:
depends_on:
logstash:
condition: service_healthy
# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# environment:
# - SENTRYPEER_PEER_TO_PEER=0
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
@ -574,6 +571,11 @@ services:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}

View file

@ -473,11 +473,8 @@ services:
depends_on:
tpotinit:
condition: service_healthy
# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# environment:
# - SENTRYPEER_PEER_TO_PEER=0
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
@ -615,9 +612,8 @@ services:
tpotinit:
condition: service_healthy
environment:
# For ET Pro ruleset replace "OPEN" with your OINKCODE
- OINKCODE=OPEN
# Loading externel Rules from URL
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
@ -645,8 +641,11 @@ services:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
env_file:
- .env
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}

View file

@ -474,11 +474,8 @@ services:
depends_on:
tpotinit:
condition: service_healthy
# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# environment:
# - SENTRYPEER_PEER_TO_PEER=0
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
@ -616,9 +613,8 @@ services:
tpotinit:
condition: service_healthy
environment:
# For ET Pro ruleset replace "OPEN" with your OINKCODE
- OINKCODE=OPEN
# Loading externel Rules from URL
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
@ -686,6 +682,9 @@ services:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g

View file

@ -1,5 +1,6 @@
# T-Pot: Docker Services Base Configuration
# This is only to be in use with the T-Pot Services Builder
# This is only to be used with the T-Pot Customizer
# Editing the contents may result in broken custom configurations!
networks:
adbhoney_local:
@ -602,11 +603,8 @@ services:
depends_on:
tpotinit:
condition: service_healthy
# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# environment:
# - SENTRYPEER_PEER_TO_PEER=0
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
@ -761,9 +759,8 @@ services:
tpotinit:
condition: service_healthy
environment:
# For ET Pro ruleset replace "OPEN" with your OINKCODE
- OINKCODE=OPEN
# Loading externel Rules from URL
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
@ -831,6 +828,9 @@ services:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g

View file

@ -474,11 +474,8 @@ services:
depends_on:
tpotinit:
condition: service_healthy
# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# environment:
# - SENTRYPEER_PEER_TO_PEER=0
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
@ -616,9 +613,8 @@ services:
tpotinit:
condition: service_healthy
environment:
# For ET Pro ruleset replace "OPEN" with your OINKCODE
- OINKCODE=OPEN
# Loading externel Rules from URL
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
@ -686,6 +682,9 @@ services:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g

View file

@ -317,6 +317,14 @@ fuTANNER () {
chown tpot:tpot -R /data/tanner
}
# Let's create a function to clean up and prepare wordpot data
fuWORDPOT () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/wordpot/log; fi
mkdir -p /data/wordpot/log
chmod 770 /data/wordpot -R
chown tpot:tpot /data/wordpot -R
}
# Avoid unwanted cleaning
if [ "$myPERSISTENCE" = "" ];
then
@ -369,4 +377,5 @@ if [ "$myPERSISTENCE" = "on" ];
fuSURICATA
fuP0F
fuTANNER
fuWORDPOT
fi

View file

@ -1,5 +1,9 @@
# T-Pot config file. Do not remove.
###############################################
# T-Pot Base Settings - Adjust to your needs. #
###############################################
# Set Web username and password here, it will be used to create the Nginx password file nginxpasswd.
# Use 'htpasswd -n <username>' to create the WEB_USER if you want to manually deploy T-Pot
# Example: 'htpasswd -n tsec' will print tsec:$apr1$TdJGdsss$6yLsxPmOcXb2kaEZ7lKva0
@ -57,10 +61,33 @@ TPOT_ATTACKMAP_TEXT=ENABLED
# Examples: America/New_York, Asia/Taipei, Australia/Melbourne, Europe/Athens, Europe/Berlin
TPOT_ATTACKMAP_TEXT_TIMEZONE=UTC
###################################################################################
# Honeypots / Tools settings
###################################################################################
# Some services / tools offer adjustments using ENVs which can be adjusted here.
###################################################################################
# SentryPeer P2P mode
# Exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# 0: This is the default, P2P mode is disabled.
# 1: Enable P2P mode.
SENTRYPEER_PEER_TO_PEER=0
# Suricata ET Pro ruleset
# OPEN: This is the default and will the ET Open ruleset
# OINKCODE: Replace OPEN with your Oinkcode to use the ET Pro ruleset
OINKCODE=OPEN
###################################################################################
# NEVER MAKE CHANGES TO THIS SECTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!! #
###################################################################################
# T-Pot Landing page provides Cockpit Link
COCKPIT=false
# docker.sock Path
TPOT_DOCKER_SOCK=/var/run/docker.sock