mirror of
https://github.com/telekom-security/tpotce.git
synced 2025-08-23 11:26:55 +00:00
936 lines
26 KiB
YAML
936 lines
26 KiB
YAML
# T-Pot: STANDARD
|
|
version: '3.9'
|
|
|
|
networks:
|
|
adbhoney_local:
|
|
ciscoasa_local:
|
|
citrixhoneypot_local:
|
|
conpot_local_IEC104:
|
|
conpot_local_guardian_ast:
|
|
conpot_local_ipmi:
|
|
conpot_local_kamstrup_382:
|
|
cowrie_local:
|
|
ddospot_local:
|
|
dicompot_local:
|
|
dionaea_local:
|
|
elasticpot_local:
|
|
heralding_local:
|
|
ipphoney_local:
|
|
mailoney_local:
|
|
medpot_local:
|
|
redishoneypot_local:
|
|
sentrypeer_local:
|
|
tanner_local:
|
|
spiderfoot_local:
|
|
wordpot_local:
|
|
ewsposter_local:
|
|
|
|
services:
|
|
|
|
#########################################
|
|
#### DEV
|
|
#########################################
|
|
#### T-Pot Init - Never delete this!
|
|
#########################################
|
|
|
|
# T-Pot Init Service
|
|
tpotinit:
|
|
container_name: tpotinit
|
|
env_file:
|
|
- .env
|
|
restart: always
|
|
stop_grace_period: 60s
|
|
tmpfs:
|
|
- /tmp/etc:uid=2000,gid=2000
|
|
- /tmp/:uid=2000,gid=2000
|
|
network_mode: "host"
|
|
cap_add:
|
|
- NET_ADMIN
|
|
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
volumes:
|
|
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
|
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
|
- ${TPOT_DATA_PATH}:/data
|
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
|
|
|
|
|
##################
|
|
#### Honeypots
|
|
##################
|
|
|
|
# Adbhoney service
|
|
adbhoney:
|
|
container_name: adbhoney
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
networks:
|
|
- adbhoney_local
|
|
ports:
|
|
- "5555:5555"
|
|
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
|
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
|
|
|
# Ciscoasa service
|
|
ciscoasa:
|
|
container_name: ciscoasa
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
tmpfs:
|
|
- /tmp/ciscoasa:uid=2000,gid=2000
|
|
networks:
|
|
- ciscoasa_local
|
|
ports:
|
|
- "5000:5000/udp"
|
|
- "8443:8443"
|
|
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
|
|
|
# CitrixHoneypot service
|
|
citrixhoneypot:
|
|
container_name: citrixhoneypot
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
networks:
|
|
- citrixhoneypot_local
|
|
ports:
|
|
- "443:443"
|
|
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/citrixhoneypot/log:/opt/citrixhoneypot/logs
|
|
|
|
# Conpot IEC104 service
|
|
conpot_IEC104:
|
|
container_name: conpot_iec104
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
environment:
|
|
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
|
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
|
- CONPOT_TEMPLATE=IEC104
|
|
- CONPOT_TMP=/tmp/conpot
|
|
tmpfs:
|
|
- /tmp/conpot:uid=2000,gid=2000
|
|
networks:
|
|
- conpot_local_IEC104
|
|
ports:
|
|
- "161:161/udp"
|
|
- "2404:2404"
|
|
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
|
|
# Conpot guardian_ast service
|
|
conpot_guardian_ast:
|
|
container_name: conpot_guardian_ast
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
environment:
|
|
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
|
|
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
|
|
- CONPOT_TEMPLATE=guardian_ast
|
|
- CONPOT_TMP=/tmp/conpot
|
|
tmpfs:
|
|
- /tmp/conpot:uid=2000,gid=2000
|
|
networks:
|
|
- conpot_local_guardian_ast
|
|
ports:
|
|
- "10001:10001"
|
|
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
|
|
# Conpot ipmi
|
|
conpot_ipmi:
|
|
container_name: conpot_ipmi
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
environment:
|
|
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
|
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
|
- CONPOT_TEMPLATE=ipmi
|
|
- CONPOT_TMP=/tmp/conpot
|
|
tmpfs:
|
|
- /tmp/conpot:uid=2000,gid=2000
|
|
networks:
|
|
- conpot_local_ipmi
|
|
ports:
|
|
- "623:623/udp"
|
|
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
|
|
# Conpot kamstrup_382
|
|
conpot_kamstrup_382:
|
|
container_name: conpot_kamstrup_382
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
environment:
|
|
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
|
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
|
- CONPOT_TEMPLATE=kamstrup_382
|
|
- CONPOT_TMP=/tmp/conpot
|
|
tmpfs:
|
|
- /tmp/conpot:uid=2000,gid=2000
|
|
networks:
|
|
- conpot_local_kamstrup_382
|
|
ports:
|
|
- "1025:1025"
|
|
- "50100:50100"
|
|
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
|
|
# Cowrie service
|
|
cowrie:
|
|
container_name: cowrie
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
tmpfs:
|
|
- /tmp/cowrie:uid=2000,gid=2000
|
|
- /tmp/cowrie/data:uid=2000,gid=2000
|
|
networks:
|
|
- cowrie_local
|
|
ports:
|
|
- "22:22"
|
|
- "23:23"
|
|
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
|
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
|
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
|
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
|
|
|
# Ddospot service
|
|
ddospot:
|
|
container_name: ddospot
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
networks:
|
|
- ddospot_local
|
|
ports:
|
|
- "19:19/udp"
|
|
- "53:53/udp"
|
|
- "123:123/udp"
|
|
# - "161:161/udp"
|
|
- "1900:1900/udp"
|
|
image: ${TPOT_REPO}/ddospot:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/ddospot/log:/opt/ddospot/ddospot/logs
|
|
- ${TPOT_DATA_PATH}/ddospot/bl:/opt/ddospot/ddospot/bl
|
|
- ${TPOT_DATA_PATH}/ddospot/db:/opt/ddospot/ddospot/db
|
|
|
|
# Dicompot service
|
|
# Get the Horos Client for testing: https://horosproject.org/
|
|
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
|
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
|
dicompot:
|
|
container_name: dicompot
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
networks:
|
|
- dicompot_local
|
|
ports:
|
|
- "11112:11112"
|
|
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
|
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
|
|
|
# Dionaea service
|
|
dionaea:
|
|
container_name: dionaea
|
|
stdin_open: true
|
|
tty: true
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
networks:
|
|
- dionaea_local
|
|
ports:
|
|
- "20:20"
|
|
- "21:21"
|
|
- "42:42"
|
|
- "69:69/udp"
|
|
- "81:81"
|
|
- "135:135"
|
|
# - "443:443"
|
|
- "445:445"
|
|
- "1433:1433"
|
|
- "1723:1723"
|
|
- "1883:1883"
|
|
- "3306:3306"
|
|
# - "5060:5060"
|
|
# - "5060:5060/udp"
|
|
# - "5061:5061"
|
|
- "27017:27017"
|
|
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
|
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
|
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
|
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
|
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
|
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
|
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
|
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
|
|
|
# ElasticPot service
|
|
elasticpot:
|
|
container_name: elasticpot
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
networks:
|
|
- elasticpot_local
|
|
ports:
|
|
- "9200:9200"
|
|
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
|
|
|
# Heralding service
|
|
heralding:
|
|
container_name: heralding
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
tmpfs:
|
|
- /tmp/heralding:uid=2000,gid=2000
|
|
networks:
|
|
- heralding_local
|
|
ports:
|
|
# - "21:21"
|
|
# - "22:22"
|
|
# - "23:23"
|
|
# - "25:25"
|
|
# - "80:80"
|
|
- "110:110"
|
|
- "143:143"
|
|
# - "443:443"
|
|
- "465:465"
|
|
- "993:993"
|
|
- "995:995"
|
|
# - "3306:3306"
|
|
# - "3389:3389"
|
|
- "1080:1080"
|
|
- "5432:5432"
|
|
- "5900:5900"
|
|
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
|
|
|
# Honeytrap service
|
|
honeytrap:
|
|
container_name: honeytrap
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
tmpfs:
|
|
- /tmp/honeytrap:uid=2000,gid=2000
|
|
network_mode: "host"
|
|
cap_add:
|
|
- NET_ADMIN
|
|
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
|
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
|
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
|
|
|
# Ipphoney service
|
|
ipphoney:
|
|
container_name: ipphoney
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
networks:
|
|
- ipphoney_local
|
|
ports:
|
|
- "631:631"
|
|
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
|
|
|
# Mailoney service
|
|
mailoney:
|
|
container_name: mailoney
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
environment:
|
|
- HPFEEDS_SERVER=
|
|
- HPFEEDS_IDENT=user
|
|
- HPFEEDS_SECRET=pass
|
|
- HPFEEDS_PORT=20000
|
|
- HPFEEDS_CHANNELPREFIX=prefix
|
|
networks:
|
|
- mailoney_local
|
|
ports:
|
|
- "25:25"
|
|
- "587:25"
|
|
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
|
|
|
# Medpot service
|
|
medpot:
|
|
container_name: medpot
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
networks:
|
|
- medpot_local
|
|
ports:
|
|
- "2575:2575"
|
|
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
|
|
|
# Redishoneypot service
|
|
redishoneypot:
|
|
container_name: redishoneypot
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
networks:
|
|
- redishoneypot_local
|
|
ports:
|
|
- "6379:6379"
|
|
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
|
|
|
# SentryPeer service
|
|
sentrypeer:
|
|
container_name: sentrypeer
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
# environment:
|
|
# - SENTRYPEER_PEER_TO_PEER=1
|
|
networks:
|
|
- sentrypeer_local
|
|
ports:
|
|
# - "4222:4222/udp"
|
|
- "5060:5060/tcp"
|
|
- "5060:5060/udp"
|
|
# - "127.0.0.1:8082:8082"
|
|
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
|
|
|
#### Snare / Tanner
|
|
## Tanner Redis Service
|
|
tanner_redis:
|
|
container_name: tanner_redis
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
tty: true
|
|
networks:
|
|
- tanner_local
|
|
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
|
|
## PHP Sandbox service
|
|
tanner_phpox:
|
|
container_name: tanner_phpox
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
tty: true
|
|
networks:
|
|
- tanner_local
|
|
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
|
|
## Tanner API Service
|
|
tanner_api:
|
|
container_name: tanner_api
|
|
restart: always
|
|
depends_on:
|
|
- tanner_redis
|
|
tmpfs:
|
|
- /tmp/tanner:uid=2000,gid=2000
|
|
tty: true
|
|
networks:
|
|
- tanner_local
|
|
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
|
command: tannerapi
|
|
|
|
## Tanner Service
|
|
tanner:
|
|
container_name: tanner
|
|
restart: always
|
|
depends_on:
|
|
- tanner_api
|
|
- tanner_phpox
|
|
tmpfs:
|
|
- /tmp/tanner:uid=2000,gid=2000
|
|
tty: true
|
|
networks:
|
|
- tanner_local
|
|
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
command: tanner
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
|
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
|
|
|
## Snare Service
|
|
snare:
|
|
container_name: snare
|
|
restart: always
|
|
depends_on:
|
|
- tanner
|
|
tty: true
|
|
networks:
|
|
- tanner_local
|
|
ports:
|
|
- "80:80"
|
|
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
|
|
# Wordpot service
|
|
wordpot:
|
|
container_name: wordpot
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
networks:
|
|
- wordpot_local
|
|
ports:
|
|
- "8080:80"
|
|
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
|
|
|
|
|
|
##################
|
|
#### NSM
|
|
##################
|
|
|
|
# Fatt service
|
|
fatt:
|
|
container_name: fatt
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
network_mode: "host"
|
|
cap_add:
|
|
- NET_ADMIN
|
|
- SYS_NICE
|
|
- NET_RAW
|
|
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
|
|
|
# P0f service
|
|
p0f:
|
|
container_name: p0f
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
network_mode: "host"
|
|
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
|
|
|
# Suricata service
|
|
suricata:
|
|
container_name: suricata
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
environment:
|
|
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
|
# Loading external Rules from URL
|
|
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
|
network_mode: "host"
|
|
cap_add:
|
|
- NET_ADMIN
|
|
- SYS_NICE
|
|
- NET_RAW
|
|
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
|
|
|
|
|
##################
|
|
#### Tools
|
|
##################
|
|
|
|
#### Wazuh
|
|
## Wazuh Indexer service
|
|
wazuh.indexer:
|
|
container_name: wazuh.indexer
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
environment:
|
|
- OPENSEARCH_JAVA_OPTS: "-Xms1g -Xmx1g"
|
|
- bootstrap.memory_lock: "true"
|
|
- NODE_NAME: "wazuh.indexer"
|
|
- CLUSTER_INITIAL_MASTER_NODES: "wazuh.indexer"
|
|
- CLUSTER_NAME: "wazuh-cluster"
|
|
- PATH_DATA: /var/lib/wazuh-indexer
|
|
- PATH_LOGS: /var/log/wazuh-indexer
|
|
- HTTP_PORT: 9200-9299
|
|
- TRANSPORT_TCP_PORT: 9300-9399
|
|
- COMPATIBILITY_OVERRIDE_MAIN_RESPONSE_VERSION: "true"
|
|
- PLUGINS_SECURITY_SSL_HTTP_PEMCERT_FILEPATH: /usr/share/wazuh-indexer/certs/wazuh.indexer.pem
|
|
- PLUGINS_SECURITY_SSL_HTTP_PEMKEY_FILEPATH: /usr/share/wazuh-indexer/certs/wazuh.indexer.key
|
|
- PLUGINS_SECURITY_SSL_HTTP_PEMTRUSTEDCAS_FILEPATH: /usr/share/wazuh-indexer/certs/root-ca.pem
|
|
- PLUGINS_SECURITY_SSL_TRANSPORT_PEMCERT_FILEPATH: /usr/share/wazuh-indexer/certs/wazuh.indexer.pem
|
|
- PLUGINS_SECURITY_SSL_TRANSPORT_PEMKEY_FILEPATH: /usr/share/wazuh-indexer/certs/wazuh.indexer.key
|
|
- PLUGINS_SECURITY_SSL_TRANSPORT_PEMTRUSTEDCAS_FILEPATH: /usr/share/wazuh-indexer/certs/root-ca.pem
|
|
- PLUGINS_SECURITY_SSL_HTTP_ENABLED: "true"
|
|
- PLUGINS_SECURITY_SSL_TRANSPORT_ENFORCE_HOSTNAME_VERIFICATION: "false"
|
|
- PLUGINS_SECURITY_SSL_TRANSPORT_RESOLVE_HOSTNAME: "false"
|
|
- PLUGINS_SECURITY_AUTHCZ_ADMIN_DN: "CN=admin,OU=Wazuh,O=Wazuh,L=California,C=US"
|
|
- PLUGINS_SECURITY_CHECK_SNAPSHOT_RESTORE_WRITE_PRIVILEGES: "true"
|
|
- PLUGINS_SECURITY_ENABLE_SNAPSHOT_RESTORE_PRIVILEGE: "true"
|
|
- PLUGINS_SECURITY_NODES_DN: "CN=wazuh.indexer,OU=Wazuh,O=Wazuh,L=California,C=US"
|
|
- PLUGINS_SECURITY_RESTAPI_ROLES_ENABLED: '["all_access", "security_rest_api_access"]'
|
|
- PLUGINS_SECURITY_SYSTEM_INDICES_ENABLED: "true"
|
|
- PLUGINS_SECURITY_SYSTEM_INDICES_INDICES: '[".opendistro-alerting-config", ".opendistro-alerting-alert*", ".opendistro-anomaly-results*", ".opendistro-anomaly-detector*", ".opendistro-anomaly-checkpoints", ".opendistro-anomaly-detection-state", ".opendistro-reports-*", ".opendistro-notifications-*", ".opendistro-notebooks", ".opensearch-observability", ".opendistro-asynchronous-search-response*", ".replication-metadata-store"]'
|
|
- PLUGINS_SECURITY_ALLOW_DEFAULT_INIT_SECURITYINDEX: "true"
|
|
- CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED: "false"
|
|
ulimits:
|
|
memlock:
|
|
soft: -1
|
|
hard: -1
|
|
nofile:
|
|
soft: 65536
|
|
hard: 65536
|
|
ports:
|
|
- "9200:9200"
|
|
image: wazuh/wazuh-indexer:4.7.5
|
|
volumes:
|
|
- wazuh-indexer-data:/var/lib/wazuh-indexer
|
|
- ./config/wazuh_indexer_ssl_certs/root-ca.pem:/usr/share/wazuh-indexer/certs/root-ca.pem
|
|
- ./config/wazuh_indexer_ssl_certs/wazuh.indexer-key.pem:/usr/share/wazuh-indexer/certs/wazuh.indexer.key
|
|
- ./config/wazuh_indexer_ssl_certs/wazuh.indexer.pem:/usr/share/wazuh-indexer/certs/wazuh.indexer.pem
|
|
- ./config/wazuh_indexer_ssl_certs/admin.pem:/usr/share/wazuh-indexer/certs/admin.pem
|
|
- ./config/wazuh_indexer_ssl_certs/admin-key.pem:/usr/share/wazuh-indexer/certs/admin-key.pem
|
|
# if you need mount a custom opensearch.yml, uncomment the next line and delete the environment variables
|
|
# - ./config/wazuh_indexer/wazuh.indexer.yml:/usr/share/wazuh-indexer/opensearch.yml
|
|
|
|
## Wazuh Dashboard service
|
|
wazuh.dashboard:
|
|
container_name: wazuh.dashboard
|
|
restart: always
|
|
depends_on:
|
|
wazuh.indexer:
|
|
condition: service_healthy
|
|
ulimits:
|
|
memlock:
|
|
soft: -1
|
|
hard: -1
|
|
nofile:
|
|
soft: 65536
|
|
hard: 65536
|
|
ports:
|
|
- 443:5601
|
|
image: wazuh/wazuh-dashboard:4.7.5
|
|
environment:
|
|
- WAZUH_API_URL: https://wazuh.manager
|
|
- DASHBOARD_USERNAME: kibanaserver
|
|
- DASHBOARD_PASSWORD: kibanaserver
|
|
- API_USERNAME: wazuh-wui
|
|
- API_PASSWORD: MyS3cr37P450r.*-
|
|
- SERVER_HOST: 0.0.0.0
|
|
- SERVER_PORT: 5601
|
|
- OPENSEARCH_HOSTS: https://wazuh.indexer:9200
|
|
- OPENSEARCH_SSL_VERIFICATIONMODE: certificate
|
|
- OPENSEARCH_REQUESTHEADERSALLOWLIST: '["securitytenant","Authorization"]'
|
|
- OPENSEARCH_SECURITY_MULTITENANCY_ENABLED: "false"
|
|
- SERVER_SSL_ENABLED: "true"
|
|
- OPENSEARCH_SECURITY_READONLY_MODE_ROLES: '["kibana_read_only"]'
|
|
- SERVER_SSL_KEY: "/usr/share/wazuh-dashboard/certs/wazuh-dashboard-key.pem"
|
|
- SERVER_SSL_CERTIFICATE: "/usr/share/wazuh-dashboard/certs/wazuh-dashboard.pem"
|
|
- OPENSEARCH_SSL_CERTIFICATEAUTHORITIES: '["/usr/share/wazuh-dashboard/certs/root-ca.pem"]'
|
|
- UISETTINGS_OVERRIDES_DEFAULTROUTE: /app/wz-home
|
|
volumes:
|
|
- wazuh-dashboard-config:/usr/share/wazuh-dashboard/data/wazuh/config
|
|
- wazuh-dashboard-custom:/usr/share/wazuh-dashboard/plugins/wazuh/public/assets/custom
|
|
- ./config/wazuh_indexer_ssl_certs/wazuh.dashboard.pem:/usr/share/wazuh-dashboard/certs/wazuh-dashboard.pem
|
|
- ./config/wazuh_indexer_ssl_certs/wazuh.dashboard-key.pem:/usr/share/wazuh-dashboard/certs/wazuh-dashboard-key.pem
|
|
- ./config/wazuh_indexer_ssl_certs/root-ca.pem:/usr/share/wazuh-dashboard/certs/root-ca.pem
|
|
- ./config/wazuh_dashboard/wazuh.yml:/wazuh-config-mount/data/wazuh/config/wazuh.yml
|
|
# if you need mount a custom opensearch-dashboards.yml, uncomment the next line and delete the environment variables
|
|
# - ./config/wazuh_dashboard/opensearch_dashboards.yml:/wazuh-config-mount/config/opensearch_dashboards.yml
|
|
links:
|
|
- wazuh.indexer:wazuh.indexer
|
|
- wazuh.manager:wazuh.manager
|
|
|
|
## Wazuh Manager service
|
|
wazuh.manager:
|
|
container_name: wazuh.manager
|
|
restart: always
|
|
depends_on:
|
|
wazuh.indexer:
|
|
condition: service_healthy
|
|
ulimits:
|
|
memlock:
|
|
soft: -1
|
|
hard: -1
|
|
nofile:
|
|
soft: 655360
|
|
hard: 655360
|
|
ports:
|
|
- "1514:1514"
|
|
- "1515:1515"
|
|
- "514:514/udp"
|
|
- "55000:55000"
|
|
environment:
|
|
- INDEXER_URL: https://wazuh.indexer:9200
|
|
- INDEXER_USERNAME: admin
|
|
- INDEXER_PASSWORD: admin
|
|
- FILEBEAT_SSL_VERIFICATION_MODE: full
|
|
- SSL_CERTIFICATE_AUTHORITIES: /etc/ssl/root-ca.pem
|
|
- SSL_CERTIFICATE: /etc/ssl/filebeat.pem
|
|
- SSL_KEY: /etc/ssl/filebeat.key
|
|
- API_USERNAME: wazuh-wui
|
|
- API_PASSWORD: MyS3cr37P450r.*-
|
|
image: wazuh/wazuh-manager:4.7.5
|
|
volumes:
|
|
- wazuh_api_configuration:/var/ossec/api/configuration
|
|
- wazuh_etc:/var/ossec/etc
|
|
- wazuh_logs:/var/ossec/logs
|
|
- wazuh_queue:/var/ossec/queue
|
|
- wazuh_var_multigroups:/var/ossec/var/multigroups
|
|
- wazuh_integrations:/var/ossec/integrations
|
|
- wazuh_active_response:/var/ossec/active-response/bin
|
|
- wazuh_agentless:/var/ossec/agentless
|
|
- wazuh_wodles:/var/ossec/wodles
|
|
- filebeat_etc:/etc/filebeat
|
|
- filebeat_var:/var/lib/filebeat
|
|
- ./config/wazuh_indexer_ssl_certs/root-ca-manager.pem:/etc/ssl/root-ca.pem
|
|
- ./config/wazuh_indexer_ssl_certs/wazuh.manager.pem:/etc/ssl/filebeat.pem
|
|
- ./config/wazuh_indexer_ssl_certs/wazuh.manager-key.pem:/etc/ssl/filebeat.key
|
|
- ./config/wazuh_cluster/wazuh_manager.conf:/wazuh-config-mount/etc/ossec.conf
|
|
|
|
volumes:
|
|
wazuh_api_configuration:
|
|
wazuh_etc:
|
|
wazuh_logs:
|
|
wazuh_queue:
|
|
wazuh_var_multigroups:
|
|
wazuh_integrations:
|
|
wazuh_active_response:
|
|
wazuh_agentless:
|
|
wazuh_wodles:
|
|
filebeat_etc:
|
|
filebeat_var:
|
|
wazuh-indexer-data:
|
|
wazuh-dashboard-config:
|
|
wazuh-dashboard-custom:
|
|
|
|
## Map Redis Service
|
|
map_redis:
|
|
container_name: map_redis
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
stop_signal: SIGKILL
|
|
tty: true
|
|
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
|
|
## Map Web Service
|
|
map_web:
|
|
container_name: map_web
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
environment:
|
|
- MAP_COMMAND=AttackMapServer.py
|
|
stop_signal: SIGKILL
|
|
tty: true
|
|
ports:
|
|
- "127.0.0.1:64299:64299"
|
|
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
|
|
## Map Data Service
|
|
map_data:
|
|
container_name: map_data
|
|
restart: always
|
|
depends_on:
|
|
elasticsearch:
|
|
condition: service_healthy
|
|
environment:
|
|
- MAP_COMMAND=DataServer_v2.py
|
|
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
|
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
|
stop_signal: SIGKILL
|
|
tty: true
|
|
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
#### /ELK
|
|
|
|
# Ewsposter service
|
|
ewsposter:
|
|
container_name: ewsposter
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
networks:
|
|
- ewsposter_local
|
|
environment:
|
|
- EWS_HPFEEDS_ENABLE=false
|
|
- EWS_HPFEEDS_HOST=host
|
|
- EWS_HPFEEDS_PORT=port
|
|
- EWS_HPFEEDS_CHANNELS=channels
|
|
- EWS_HPFEEDS_IDENT=user
|
|
- EWS_HPFEEDS_SECRET=secret
|
|
- EWS_HPFEEDS_TLSCERT=false
|
|
- EWS_HPFEEDS_FORMAT=json
|
|
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}:/data
|
|
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
|
|
|
# Nginx service
|
|
nginx:
|
|
container_name: nginx
|
|
restart: always
|
|
environment:
|
|
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
tmpfs:
|
|
- /var/tmp/nginx/client_body
|
|
- /var/tmp/nginx/proxy
|
|
- /var/tmp/nginx/fastcgi
|
|
- /var/tmp/nginx/uwsgi
|
|
- /var/tmp/nginx/scgi
|
|
- /run
|
|
- /var/lib/nginx/tmp:uid=100,gid=82
|
|
network_mode: "host"
|
|
ports:
|
|
- "64297:64297"
|
|
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
read_only: true
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
|
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
|
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
|
|
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
|
|
|
# Spiderfoot service
|
|
spiderfoot:
|
|
container_name: spiderfoot
|
|
restart: always
|
|
depends_on:
|
|
tpotinit:
|
|
condition: service_healthy
|
|
networks:
|
|
- spiderfoot_local
|
|
ports:
|
|
- "127.0.0.1:64303:8080"
|
|
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
|
pull_policy: ${TPOT_PULL_POLICY}
|
|
volumes:
|
|
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|