Compare commits

..

No commits in common. "master" and "24.04.0" have entirely different histories.

176 changed files with 3724 additions and 4773 deletions

47
.env
View file

@ -44,14 +44,13 @@ TPOT_PERSISTENCE=on
# HIVE: This is the default and offers everything to connect T-Pot sensors.
# SENSOR: This needs to be used when running a sensor. Be aware to adjust all other
# settings as well.
# 1. You will need to copy compose/sensor.yml to ./docker-compose.yml
# 1. You will need to copy compose/sensor.yml to ./docker-comopose.yml
# 2. From HIVE host you will need to copy ~/tpotce/data/nginx/cert/nginx.crt to
# your SENSOR host to ~/tpotce/data/hive.crt
# 3. On HIVE: Create a web user per SENSOR on HIVE and provide credentials below
# Create credentials with 'htpasswd ~/tpotce/data/nginx/conf/lswebpasswd <username>'
# 4. On SENSOR: Provide username / password from (3) for TPOT_HIVE_USER as base64 encoded string:
# "echo -n 'username:password' | base64 -w0"
# MOBILE: This will set the correct type for T-Pot Mobile (https://github.com/telekom-security/tpotmobile)
TPOT_TYPE=HIVE
# T-Pot Hive User (only relevant for SENSOR deployment)
@ -60,18 +59,6 @@ TPOT_TYPE=HIVE
# i.e. TPOT_HIVE_USER='dXNlcm5hbWU6cGFzc3dvcmQ='
TPOT_HIVE_USER=
# Logstash Sensor SSL verfication (only relevant on SENSOR hosts)
# full: This is the default. Logstash, by default, verifies the complete certificate chain for ssl certificates.
# This also includes the FQDN and sANs. By default T-Pot will only generate a self-signed certificate which
# contains a sAN for the HIVE IP. In scenario where the HIVE needs to be accessed via Internet, maybe with
# a different NAT address, a new certificate needs to be generated before deployment that includes all the
# IPs and FQDNs as sANs for logstash successfully establishing a connection to the HIVE for transmitting
# logs. Details here: https://github.com/telekom-security/tpotce?tab=readme-ov-file#distributed-deployment
# none: This setting will disable the ssl verification check of logstash and should only be used in a testing
# environment where IPs often change. It is not recommended for a production environment where trust between
# HIVE and SENSOR is only established through a self signed certificate.
LS_SSL_VERIFICATION=full
# T-Pot Hive IP (only relevant for SENSOR deployment)
# <empty>: This is empty by default.
# <IP, FQDN>: This can be either a IP (i.e. 192.168.1.1) or a FQDN (i.e. foo.bar.local)
@ -100,34 +87,6 @@ TPOT_ATTACKMAP_TEXT_TIMEZONE=UTC
# OINKCODE: Replace OPEN with your Oinkcode to use the ET Pro ruleset
OINKCODE=OPEN
# Beelzebub Honeypot supports LLMs such as ChatGPT and the Ollama backend.
# Beelzebub is not part of the standard edition, please follow the README regarding setup.
# It is recommended to use the Ollama backend to keep costs at bay.
# Remember to rate limit API usage / set budget alerts when using ChatGPT API.
# BEELZEBUB_LLM_MODEL: Set to "ollama" or "gpt4-o".
# BEELZEBUB_LLM_HOST: When using "ollama" set it to the URL of your Ollama backend.
# BEELZEBUB_OLLAMA_MODEL: Set to the model you are serving on your Ollama backend, i.e. "openchat".
# BEELZEBUB_LLM_MODEL: "gpt4-o"
# BEELZEBUB_OPENAISECRETKEY: "sk-proj-123456"
BEELZEBUB_LLM_MODEL: "ollama"
BEELZEBUB_LLM_HOST: "http://ollama.local:11434/api/chat"
BEELZEBUB_OLLAMA_MODEL: "openchat"
# Galah is a LLM-powered web honeypot supporting various LLM backends.
# Galah is not part of the standard edition, please follow the README regarding setup.
# It is recommended to use the Ollama backend to keep costs at bay.
# Remember to rate limit API usage / set budget alerts when using ChatGPT API.
# GALAH_LLM_PROVIDER: Set to "ollama" or "gpt4-o".
# GALAH_LLM_SERVER_URL: When using "ollama" set it to the URL of your Ollama backend.
# GALAH_LLM_MODEL: Set to the model you are serving on your Ollama backend, i.e. "llama3".
# GALAH_LLM_TEMPERATURE: "1"
# GALAH_LLM_API_KEY: "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# GALAH_LLM_CLOUD_LOCATION: ""
# GALAH_LLM_CLOUD_PROJECT: ""
GALAH_LLM_PROVIDER: "ollama"
GALAH_LLM_SERVER_URL: "http://ollama.local:11434"
GALAH_LLM_MODEL: "llama3.1"
###################################################################################
# NEVER MAKE CHANGES TO THIS SECTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!! #
@ -146,10 +105,10 @@ TPOT_DOCKER_COMPOSE=./docker-compose.yml
# Depending on where you are located you may choose between DockerHub and GHCR
# dtagdevsec: This will use the DockerHub image registry
# ghcr.io/telekom-security: This will use the GitHub container registry
TPOT_REPO=ghcr.io/telekom-security
TPOT_REPO=dtagdevsec
# T-Pot Version Tag
TPOT_VERSION=24.04.1
TPOT_VERSION=24.04
# T-Pot Pull Policy
# always: (T-Pot default) Compose implementations SHOULD always pull the image from the registry.

View file

@ -13,7 +13,6 @@ Before you post your issue make sure it has not been answered yet and provide **
- 🔍 Use the [search function](https://github.com/dtag-dev-sec/tpotce/issues?utf8=%E2%9C%93&q=) first
- 🧐 Check our [Wiki](https://github.com/dtag-dev-sec/tpotce/wiki) and the [discussions](https://github.com/telekom-security/tpotce/discussions)
- 📚 Consult the documentation of 💻 your Linux OS, 🐳 [Docker](https://docs.docker.com/), the 🦌 [Elastic stack](https://www.elastic.co/guide/index.html) and the 🍯 [T-Pot Readme](https://github.com/dtag-dev-sec/tpotce/blob/master/README.md).
- ⚙️ The [Troubleshoot Section](https://github.com/telekom-security/tpotce?tab=readme-ov-file#troubleshooting) of the [T-Pot Readme](https://github.com/dtag-dev-sec/tpotce/blob/master/README.md) is a good starting point to collect a good set of information for the issue and / or to fix things on your own.
- **⚠️ Provide [BASIC SUPPORT INFORMATION](#-basic-support-information-commands-are-expected-to-run-as-root) or similar detailed information with regard to your issue or we will close the issue or convert it into a discussion without further interaction from the maintainers**.<br>
# ⚠️ Basic support information (commands are expected to run as `root`)
@ -33,7 +32,7 @@ Before you post your issue make sure it has not been answered yet and provide **
- Did you modify any scripts or configs? If yes, please attach the changes.
- Please provide a screenshot of `htop` and `docker stats`.
- How much free disk space is available (`df -h`)?
- What is the current container status (`dps`)?
- What is the current container status (`dps.sh`)?
- On Linux: What is the status of the T-Pot service (`systemctl status tpot`)?
- What ports are being occupied? Stop T-Pot `systemctl stop tpot` and run `grc netstat -tulpen`
- Stop T-Pot `systemctl stop tpot`

View file

@ -13,7 +13,6 @@ Before you post your issue make sure it has not been answered yet and provide **
- 🔍 Use the [search function](https://github.com/dtag-dev-sec/tpotce/issues?utf8=%E2%9C%93&q=) first
- 🧐 Check our [Wiki](https://github.com/dtag-dev-sec/tpotce/wiki) and the [discussions](https://github.com/telekom-security/tpotce/discussions)
- 📚 Consult the documentation of 💻 your Linux OS, 🐳 [Docker](https://docs.docker.com/), the 🦌 [Elastic stack](https://www.elastic.co/guide/index.html) and the 🍯 [T-Pot Readme](https://github.com/dtag-dev-sec/tpotce/blob/master/README.md).
- ⚙️ The [Troubleshoot Section](https://github.com/telekom-security/tpotce?tab=readme-ov-file#troubleshooting) of the [T-Pot Readme](https://github.com/dtag-dev-sec/tpotce/blob/master/README.md) is a good starting point to collect a good set of information for the issue and / or to fix things on your own.
- **⚠️ Provide [BASIC SUPPORT INFORMATION](#-basic-support-information-commands-are-expected-to-run-as-root) or similar detailed information with regard to your issue or we will close the issue or convert it into a discussion without further interaction from the maintainers**.<br>
# ⚠️ Basic support information (commands are expected to run as `root`)
@ -33,7 +32,7 @@ Before you post your issue make sure it has not been answered yet and provide **
- Did you modify any scripts or configs? If yes, please attach the changes.
- Please provide a screenshot of `htop` and `docker stats`.
- How much free disk space is available (`df -h`)?
- What is the current container status (`dps`)?
- What is the current container status (`dps.sh`)?
- On Linux: What is the status of the T-Pot service (`systemctl status tpot`)?
- What ports are being occupied? Stop T-Pot `systemctl stop tpot` and run `grc netstat -tulpen`
- Stop T-Pot `systemctl stop tpot`

View file

@ -1,49 +0,0 @@
name: "Check Basic Support Info"
on:
issues:
types: [opened, edited]
permissions:
issues: write
contents: read
jobs:
check-issue:
runs-on: ubuntu-latest
steps:
- name: Check out the repository
uses: actions/checkout@v4
- name: Install jq
run: sudo apt-get install jq -y
- name: Check issue for basic support info
id: check_issue
run: |
REQUIRED_INFO=("What OS are you T-Pot running on?" "What is the version of the OS" "What T-Pot version are you currently using" "What architecture are you running on" "Review the \`~/install_tpot.log\`" "How long has your installation been running?" "Did you install upgrades, packages or use the update script?" "Did you modify any scripts or configs?" "Please provide a screenshot of \`htop\` and \`docker stats\`." "How much free disk space is available" "What is the current container status" "What is the status of the T-Pot service" "What ports are being occupied?")
ISSUE_BODY=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.body')
MISSING_INFO=()
for info in "${REQUIRED_INFO[@]}"; do
if [[ "$ISSUE_BODY" != *"$info"* ]]; then
MISSING_INFO+=("$info")
fi
done
if [ ${#MISSING_INFO[@]} -ne 0 ]; then
echo "missing=true" >> $GITHUB_ENV
else
echo "missing=false" >> $GITHUB_ENV
fi
- name: Add "no basic support info" label if necessary
if: env.missing == 'true'
run: gh issue edit "$NUMBER" --add-label "$LABELS"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GH_REPO: ${{ github.repository }}
NUMBER: ${{ github.event.issue.number }}
LABELS: no basic support info

View file

@ -1,24 +0,0 @@
name: "Tag stale issues and pull requests"
on:
schedule:
- cron: "0 0 * * *" # Runs every day at midnight
workflow_dispatch: # Allows the workflow to be triggered manually
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v7
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: "This issue has been marked as stale because it has had no activity for 7 days. If you are still experiencing this issue, please comment or it will be closed in 7 days."
stale-pr-message: "This pull request has been marked as stale because it has had no activity for 7 days. If you are still working on this, please comment or it will be closed in 7 days."
days-before-stale: 7
days-before-close: 7
stale-issue-label: "stale"
exempt-issue-labels: "keep-open"
stale-pr-label: "stale"
exempt-pr-labels: "keep-open"
operations-per-run: 30
debug-only: false

1
.gitignore vendored
View file

@ -1,6 +1,5 @@
# Ignore data folder
data/
_data/
**/.DS_Store
.idea
install_tpot.log

View file

@ -1,46 +1,36 @@
# Release Notes / Changelog
T-Pot 24.04.1 brings significant updates and exciting new honeypot additions, especially the LLM-based honeypots **Beelzebub** and **Galah**!
T-Pot 24.04.0 marks probably the largest change in the history of the project. While most of the changes have been made to the underlying platform some changes will be standing out in particular - a T-Pot ISO image will no longer be provided with the benefit that T-Pot will now run on multiple Linux distributions (Alma Linux, Debian, Fedora, OpenSuse, Raspbian, Rocky Linux, Ubuntu), Raspberry Pi (optimized) and macOS / Windows (limited).
## New Features
* **Beelzebub** (SSH) and **Galah** (HTTP) are the first LLM-based honeypots included in T-Pot (requires Ollama installation or a ChatGPT subscription).
* **Go-Pot** a HTTP tarpit designed to maximize bot misery by slowly feeding them an infinite stream of fake secrets.
* **Honeyaml** a configurable API server honeypot even supporting JWT-based HTTP bearer/token authentication.
* **H0neytr4p** a HTTP/S honeypot capable of emulating vulnerabilities using configurable traps.
* **Miniprint** a medium-interaction printer honeypot.
* **Distributed** Installation is now using NGINX reverse proxy instead of SSH to transmit **HIVE_SENSOR** logs to **HIVE**
* **`deploy.sh`**, will make the deployment of sensor much easier and will automatically take care of the configuration. You only have to install the T-Pot sensor.
* **T-Pot Init** is the foundation for running T-Pot on multiple Linux distributions and will also ensure to restart containers with failed healthchecks using **autoheal**
* **T-Pot Installer** is now mostly Ansible based providing a universal playbook for the most common Linux distributions
* **T-Pot Uninstaller** allows to uninstall T-Pot, while not recommended for general usage, this comes in handy for testing purposes
* **T-Pot Customizer (`compose/customizer.py`)** is here to assist you in the creation of a customized `docker-compose.yml`
* **T-Pot Landing Page** has been redesigned and simplified
![T-Pot-WebUI](doc/tpotwebui.png)
* **Kibana Dashboards, Objects** fully refreshed in favor of Lens based objects
![Dashbaord](doc/kibana_a.png)
* **Wordpot** is added as new addition to the available honeypots within T-Pot and will run on `tcp/8080` by default.
* **Raspberry Pi** is now supported using a dedicated `mobile.yml` (why this is called mobile will be revealed soon!)
* **GeoIP Attack Map** is now aware of connects / disconnects and thus eliminating required reloads
* **Docker**, where possible, will now be installed directly from the Docker repositories to avoid any incompatibilities
* **`.env`** now provides a single configuration file for the T-Pot related settings
* **`genuser.sh`** can now be used to add new users to the T-Pot Landing Page as part of the T-Pot configuration file (`.env`)
## Updates
* **Honeypots** were updated to their latest pushed code and / or releases.
* **Editions** have been re-introduced. You can now additionally choose to install T-Pot as **Mini**, **LLM** and **Tarpit** edition.
* **Attack Map** has been updated to 2.2.6 including support for all new honeypots.
* **Elastic Stack** has been upgrade to 8.16.1.
* **Cyberchef** has been updated to the latest release.
* **Elasticvue** has been updated to 1.1.0.
* **Suricata** has been updated to 7.0.7, now supporting JA4 hashes.
* Most honeypots now use **PyInstaller** (for Python) and **Scratch** (for Go) to minimize Docker image sizes.
* All new honeypots have been integrated with **Kibana**, featuring dedicated dashboards and visualizations.
* **Github Container Registry** is now the default container registry for the T-Pot configuration file `.env`.
* Compatibility tested with **Alma 9.5**, **Fedora 41**, **Rocky 9.5**, and **Ubuntu 24.04.1**, with updated supported ISO links.
* Docker images now use **Alpine 3.20** or **Scratch** wherever possible.
* Updates for `24.04.1` images will be provided continuously through Docker image updates.
* **Ddospot** has been moved from the Hive / Sensor installation to the Tarpit installation.
* **Honeypots** and **tools** were updated to their latest pushed code and / or releases
* Where possible Docker Images will now use Alpine 3.19
* Updates will be provided continuously through Docker Images updates
## Breaking Changes
### NGINX
- The container no longer runs in host mode, requiring changes to the `docker-compose.yml` and related services.
- To avoid confusion and downtime, the `24.04.1` tag for Docker images has been introduced.
- **Important**: Actively update T-Pot as described in the [README](https://github.com/telekom-security/tpotce/blob/master/README.md).
- **Deprecation Notice**: The `24.04` tagged images will no longer be maintained and will be removed by **2025-01-31**.
* There is no option to migrate a previous installation to T-Pot 24.04.0, you can try to transfer the old `data` folder to the new T-Pot installation, but a working environment depends on too many other factors outside of our control and a new installation is simply faster.
* Most of the support scripts were moved into the **T-Pot Init** image and are no longer available directly on the host.
* Cockpit is no longer available as part of T-Pot itself. However, where supported, you can simply install the `cockpit` package.
### Suricata
- Capture filters have been updated to exclude broadcast, multicast, NetBIOS, IGMP, and MDNS traffic.
# Thanks & Credits
* @sp3t3rs, @trixam, for their backend and ews support!
* @shark4ce for taking the time to test, debug and offer a solution #1472.
## Thanks & Credits
A heartfelt thank you to the contributors who made this release possible:
* @elivlo, @mancasa, koalafiedTroll, @trixam, for their backend and ews support!
* @mariocandela for his work and updates on Beelzebub based on our discussions!
* @ryanolee for approaching us and adding valuable features to go-pot based on our discussions!
* @neon-ninja for the work on #1661!
* @sarkoziadam for the work on #1643!
* @glaslos for the work on #1538!
… and to the entire T-Pot community for opening issues, sharing ideas, and helping improve T-Pot!
... and many others from the T-Pot community by opening valued issues and discussions, suggesting ideas and thus helping to improve T-Pot!

View file

@ -2,7 +2,7 @@
# Visit https://bit.ly/cffinit to generate yours today!
cff-version: 1.2.0
title: T-Pot 24.04.1
title: T-Pot 24.04.0
message: >-
If you use this software, please cite it using the
metadata from this file.
@ -20,8 +20,8 @@ authors:
identifiers:
- type: url
value: >-
https://github.com/telekom-security/tpotce/releases/tag/24.04.1
description: T-Pot Release 24.04.1
https://github.com/telekom-security/tpotce/releases/tag/24.04.0
description: T-Pot Release 24.04.0
repository-code: 'https://github.com/telekom-security/tpotce'
abstract: >-
T-Pot is the all in one, optionally distributed, multiarch
@ -39,5 +39,5 @@ keywords:
- elk
license: GPL-3.0
commit: release
version: 24.04.1
date-released: '2024-12-11'
version: 24.04.0
date-released: '2024-04-22'

511
README.md
View file

@ -16,83 +16,73 @@ env bash -c "$(curl -sL https://github.com/telekom-security/tpotce/raw/master/in
```
* Follow instructions, read messages, check for possible port conflicts and reboot
# Table of Contents
<!-- TOC -->
- [T-Pot - The All In One Multi Honeypot Platform](#t-pot---the-all-in-one-multi-honeypot-platform)
- [TL;DR](#tldr)
- [Disclaimer](#disclaimer)
- [Technical Concept](#technical-concept)
- [Honeypots and Tools](#honeypots-and-tools)
- [Technical Architecture](#technical-architecture)
- [Services](#services)
- [User Types](#user-types)
- [System Requirements](#system-requirements)
- [Running in a VM](#running-in-a-vm)
- [Running on Hardware](#running-on-hardware)
- [Running in a Cloud](#running-in-a-cloud)
- [Required Ports](#required-ports)
- [LLM-Based Honeypots](#llm-based-honeypots)
- [Ollama](#ollama)
- [ChatGPT](#chatgpt)
- [System Placement](#system-placement)
- [Installation](#installation)
- [Choose your distro](#choose-your-distro)
- [Raspberry Pi 4 (8GB) Support](#raspberry-pi-4-8gb-support)
- [Get and install T-Pot](#get-and-install-t-pot)
- [macOS \& Windows](#macos--windows)
- [Installation Types](#installation-types)
- [Standard / Hive](#standard--hive)
- [Distributed](#distributed)
- [Uninstall T-Pot](#uninstall-t-pot)
- [First Start](#first-start)
- [Standalone First Start](#standalone-first-start)
- [Distributed Deployment](#distributed-deployment)
- [Planning and Certificates](#planning-and-certificates)
- [Deploying Sensors](#deploying-sensors)
- [Removing Sensors](#removing-sensors)
- [Community Data Submission](#community-data-submission)
- [Opt-In HPFEEDS Data Submission](#opt-in-hpfeeds-data-submission)
- [Remote Access and Tools](#remote-access-and-tools)
- [SSH](#ssh)
- [T-Pot Landing Page](#t-pot-landing-page)
- [Kibana Dashboard](#kibana-dashboard)
- [Attack Map](#attack-map)
- [Cyberchef](#cyberchef)
- [Elasticvue](#elasticvue)
- [Spiderfoot](#spiderfoot)
- [Configuration](#configuration)
- [T-Pot Config File](#t-pot-config-file)
- [Customize T-Pot Honeypots and Services](#customize-t-pot-honeypots-and-services)
- [Maintenance](#maintenance)
- [General Updates](#general-updates)
- [Update Script](#update-script)
- [Daily Reboot](#daily-reboot)
- [Known Issues](#known-issues)
- [Docker Images Fail to Download](#docker-images-fail-to-download)
- [T-Pot Networking Fails](#t-pot-networking-fails)
- [Start T-Pot](#start-t-pot)
- [Stop T-Pot](#stop-t-pot)
- [T-Pot Data Folder](#t-pot-data-folder)
- [Log Persistence](#log-persistence)
- [Factory Reset](#factory-reset)
- [Show Containers](#show-containers)
- [Blackhole](#blackhole)
- [Add Users to Nginx (T-Pot WebUI)](#add-users-to-nginx-t-pot-webui)
- [Import and Export Kibana Objects](#import-and-export-kibana-objects)
- [Export](#export)
- [Import](#import)
- [Troubleshooting](#troubleshooting)
- [Logs](#logs)
- [RAM and Storage](#ram-and-storage)
- [Contact](#contact)
- [Issues](#issues)
- [Discussions](#discussions)
- [Licenses](#licenses)
- [Credits](#credits)
- [The developers and development communities of](#the-developers-and-development-communities-of)
- [**The following companies and organizations**](#the-following-companies-and-organizations)
- [**And of course ***YOU*** for joining the community!**](#and-of-course-you-for-joining-the-community)
- [Testimonials](#testimonials)
- [Thank you 💖](#thank-you-)
* [T-Pot - The All In One Multi Honeypot Platform](#t-pot---the-all-in-one-multi-honeypot-platform)
* [TL;DR](#tldr)
* [Table of Contents](#table-of-contents)
* [Disclaimer](#disclaimer)
* [Technical Concept](#technical-concept)
* [Technical Architecture](#technical-architecture)
* [Services](#services)
* [User Types](#user-types)
* [System Requirements](#system-requirements)
* [Running in a VM](#running-in-a-vm)
* [Running on Hardware](#running-on-hardware)
* [Running in a Cloud](#running-in-a-cloud)
* [Required Ports](#required-ports)
* [System Placement](#system-placement)
* [Installation](#installation)
* [Choose your distro](#choose-your-distro)
* [Raspberry Pi 4 (8GB) Support](#raspberry-pi-4-8gb-support)
* [Get and install T-Pot](#get-and-install-t-pot)
* [macOS & Windows](#macos--windows)
* [Installation Types](#installation-types)
* [Standard / HIVE](#standard--hive)
* [**Distributed**](#distributed)
* [Uninstall T-Pot](#uninstall-t-pot)
* [First Start](#first-start)
* [Standalone First Start](#standalone-first-start)
* [Distributed Deployment](#distributed-deployment)
* [Community Data Submission](#community-data-submission)
* [Opt-In HPFEEDS Data Submission](#opt-in-hpfeeds-data-submission)
* [Remote Access and Tools](#remote-access-and-tools)
* [SSH](#ssh)
* [T-Pot Landing Page](#t-pot-landing-page-)
* [Kibana Dashboard](#kibana-dashboard)
* [Attack Map](#attack-map)
* [Cyberchef](#cyberchef)
* [Elasticvue](#elasticvue)
* [Spiderfoot](#spiderfoot)
* [Configuration](#configuration)
* [T-Pot Config File](#t-pot-config-file)
* [Customize T-Pot Honeypots and Services](#customize-t-pot-honeypots-and-services)
* [Maintenance](#maintenance)
* [General Updates](#general-updates)
* [Update Script](#update-script)
* [Known Issues](#known-issues)
* [**Docker Images Fail to Download**](#docker-images-fail-to-download)
* [Start T-Pot](#start-t-pot)
* [Stop T-Pot](#stop-t-pot)
* [T-Pot Data Folder](#t-pot-data-folder)
* [Log Persistence](#log-persistence)
* [Factory Reset](#factory-reset)
* [Show Containers](#show-containers)
* [Blackhole](#blackhole)
* [Add Users to Nginx (T-Pot WebUI)](#add-users-to-nginx-t-pot-webui)
* [Import and Export Kibana Objects](#import-and-export-kibana-objects)
* [**Export**](#export)
* [**Import**](#import)
* [Troubleshooting](#troubleshooting)
* [Logs](#logs)
* [RAM and Storage](#ram-and-storage)
* [Contact](#contact)
* [Issues](#issues)
* [Discussions](#discussions)
* [Licenses](#licenses)
* [Credits](#credits)
* [The developers and development communities of](#the-developers-and-development-communities-of)
* [Testimonials](#testimonials)
<!-- TOC -->
<br><br>
@ -108,40 +98,33 @@ env bash -c "$(curl -sL https://github.com/telekom-security/tpotce/raw/master/in
T-Pot's main components have been moved into the `tpotinit` Docker image allowing T-Pot to now support multiple Linux distributions, even macOS and Windows (although both limited to the feature set of Docker Desktop). T-Pot uses [docker](https://www.docker.com/) and [docker compose](https://docs.docker.com/compose/) to reach its goal of running as many honeypots and tools as possible simultaneously and thus utilizing the host's hardware to its maximum.
<br><br>
## Honeypots and Tools
- T-Pot offers docker images for the following honeypots:<br>
[adbhoney](https://github.com/huuck/ADBHoney),
[beelzebub](https://github.com/mariocandela/beelzebub),
[ciscoasa](https://github.com/Cymmetria/ciscoasa_honeypot),
[citrixhoneypot](https://github.com/MalwareTech/CitrixHoneypot),
[conpot](http://conpot.org/),
[cowrie](https://github.com/cowrie/cowrie),
[ddospot](https://github.com/aelth/ddospot),
[dicompot](https://github.com/nsmfoo/dicompot),
[dionaea](https://github.com/DinoTools/dionaea),
[elasticpot](https://gitlab.com/bontchev/elasticpot),
[endlessh](https://github.com/skeeto/endlessh),
[galah](https://github.com/0x4D31/galah),
[go-pot](https://github.com/ryanolee/go-pot),
[glutton](https://github.com/mushorg/glutton),
[h0neytr4p](https://github.com/pbssubhash/h0neytr4p),
[hellpot](https://github.com/yunginnanet/HellPot),
[heralding](https://github.com/johnnykv/heralding),
[honeyaml](https://github.com/mmta/honeyaml),
[honeypots](https://github.com/qeeqbox/honeypots),
[honeytrap](https://github.com/armedpot/honeytrap/),
[ipphoney](https://gitlab.com/bontchev/ipphoney),
[log4pot](https://github.com/thomaspatzke/Log4Pot),
[mailoney](https://github.com/awhitehatter/mailoney),
[medpot](https://github.com/schmalle/medpot),
[miniprint](https://github.com/sa7mon/miniprint),
[redishoneypot](https://github.com/cypwnpwnsocute/RedisHoneyPot),
[sentrypeer](https://github.com/SentryPeer/SentryPeer),
[snare](http://mushmush.org/),
[tanner](http://mushmush.org/),
[wordpot](https://github.com/gbrindisi/wordpot)
T-Pot offers docker images for the following honeypots ...
* [adbhoney](https://github.com/huuck/ADBHoney),
* [ciscoasa](https://github.com/Cymmetria/ciscoasa_honeypot),
* [citrixhoneypot](https://github.com/MalwareTech/CitrixHoneypot),
* [conpot](http://conpot.org/),
* [cowrie](https://github.com/cowrie/cowrie),
* [ddospot](https://github.com/aelth/ddospot),
* [dicompot](https://github.com/nsmfoo/dicompot),
* [dionaea](https://github.com/DinoTools/dionaea),
* [elasticpot](https://gitlab.com/bontchev/elasticpot),
* [endlessh](https://github.com/skeeto/endlessh),
* [glutton](https://github.com/mushorg/glutton),
* [hellpot](https://github.com/yunginnanet/HellPot),
* [heralding](https://github.com/johnnykv/heralding),
* [honeypots](https://github.com/qeeqbox/honeypots),
* [honeytrap](https://github.com/armedpot/honeytrap/),
* [ipphoney](https://gitlab.com/bontchev/ipphoney),
* [log4pot](https://github.com/thomaspatzke/Log4Pot),
* [mailoney](https://github.com/awhitehatter/mailoney),
* [medpot](https://github.com/schmalle/medpot),
* [redishoneypot](https://github.com/cypwnpwnsocute/RedisHoneyPot),
* [sentrypeer](https://github.com/SentryPeer/SentryPeer),
* [snare](http://mushmush.org/),
* [tanner](http://mushmush.org/),
* [wordpot](https://github.com/gbrindisi/wordpot)
Alongside the following tools:
... alongside the following tools ...
* [Autoheal](https://github.com/willfarrell/docker-autoheal) a tool to automatically restart containers with failed healthchecks.
* [Cyberchef](https://gchq.github.io/CyberChef/) a web app for encryption, encoding, compression and data analysis.
* [Elastic Stack](https://www.elastic.co/videos) to beautifully visualize all the events captured by T-Pot.
@ -150,7 +133,7 @@ Alongside the following tools:
* [T-Pot-Attack-Map](https://github.com/t3chn0m4g3/t-pot-attack-map) a beautifully animated attack map for T-Pot.
* [P0f](https://lcamtuf.coredump.cx/p0f3/) is a tool for purely passive traffic fingerprinting.
* [Spiderfoot](https://github.com/smicallef/spiderfoot) an open source intelligence automation tool.
* [Suricata](https://suricata.io/) a Network Security Monitoring engine.
* [Suricata](http://suricata-ids.org/) a Network Security Monitoring engine.
... to give you the best out-of-the-box experience possible and an easy-to-use multi-honeypot system.
<br><br>
@ -190,7 +173,7 @@ T-Pot offers a number of services which are basically divided into five groups:
During the installation and during the usage of T-Pot there are two different types of accounts you will be working with. Make sure you know the differences of the different account types, since it is **by far** the most common reason for authentication errors.
| Service | Account Type | Username / Group | Description |
| :--------------- | :----------- | :--------------- | :----------------------------------------------------------------- |
|:-----------------|:-------------|:-----------------|:-------------------------------------------------------------------|
| SSH | OS | `<OS_USERNAME>` | The user you chose during the installation of the OS. |
| Nginx | BasicAuth | `<WEB_USER>` | `<web_user>` you chose during the installation of T-Pot. |
| CyberChef | BasicAuth | `<WEB_USER>` | `<web_user>` you chose during the installation of T-Pot. |
@ -208,10 +191,10 @@ During the installation and during the usage of T-Pot there are two different ty
Depending on the [supported Linux distro images](#choose-your-distro), hive / sensor, installing on [real hardware](#running-on-hardware), in a [virtual machine](#running-in-a-vm) or other environments there are different kind of requirements to be met regarding OS, RAM, storage and network for a successful installation of T-Pot (you can always adjust `~/tpotce/docker-compose.yml` and `~/tpotce/.env`to your needs to overcome these requirements).
<br><br>
| T-Pot Type | RAM | Storage | Description |
| :--------- | :--- | :-------- | :----------------------------------------------------------------------------------------------- |
| Hive | 16GB | 256GB SSD | As a rule of thumb, the more honeypots, sensors & data, the more RAM and storage is needed. |
| Sensor | 8GB | 128GB SSD | Since honeypot logs are persisted (~/tpotce/data) for 30 days, storage depends on attack volume. |
| T-Pot Type | RAM | Storage | Description |
|:-----------|:-----|:-----------|:-------------------------------------------------------------------------------------------------|
| Hive | 16GB | 256GB SSD | As a rule of thumb, the more sensors & data, the more RAM and storage is needed. |
| Sensor | 8GB | 128GB SSD | Since honeypot logs are persisted (~/tpotce/data) for 30 days, storage depends on attack volume. |
T-Pot does require ...
- an IPv4 address via DHCP or statically assigned
@ -250,14 +233,12 @@ Some users report working installations on other clouds and hosters, i.e. Azure
Besides the ports generally needed by the OS, i.e. obtaining a DHCP lease, DNS, etc. T-Pot will require the following ports for incoming / outgoing connections. Review the [T-Pot Architecture](#technical-architecture) for a visual representation. Also some ports will show up as duplicates, which is fine since used in different editions.
| Port | Protocol | Direction | Description |
| :------------------------------------------------------------------------------------------------------------------------------------ | :------- | :-------- | :-------------------------------------------------------------------------------------------------- |
|:--------------------------------------------------------------------------------------------------------------------------------------|:---------|:----------|:----------------------------------------------------------------------------------------------------|
| 80, 443 | tcp | outgoing | T-Pot Management: Install, Updates, Logs (i.e. OS, GitHub, DockerHub, Sicherheitstacho, etc. |
| 11434 | tcp | outgoing | LLM based honeypots: Access your Ollama installation |
| 64294 | tcp | incoming | T-Pot Management: Sensor data transmission to hive (through NGINX reverse proxy) to 127.0.0.1:64305 |
| 64295 | tcp | incoming | T-Pot Management: Access to SSH |
| 64297 | tcp | incoming | T-Pot Management Access to NGINX reverse proxy |
| 5555 | tcp | incoming | Honeypot: ADBHoney |
| 22 | tcp | incoming | Honeypot: Beelzebub (LLM required) |
| 5000 | udp | incoming | Honeypot: CiscoASA |
| 8443 | tcp | incoming | Honeypot: CiscoASA |
| 443 | tcp | incoming | Honeypot: CitrixHoneypot |
@ -270,18 +251,13 @@ Besides the ports generally needed by the OS, i.e. obtaining a DHCP lease, DNS,
| 69 | udp | incoming | Honeypot: Dionaea |
| 9200 | tcp | incoming | Honeypot: Elasticpot |
| 22 | tcp | incoming | Honeypot: Endlessh |
| 80, 443, 8080, 8443 | tcp | incoming | Honeypot: Galah (LLM required) |
| 8080 | tcp | incoming | Honeypot: Go-pot |
| 80, 443 | tcp | incoming | Honeypot: H0neytr4p |
| 21, 22, 23, 25, 80, 110, 143, 443, 993, 995, 1080, 5432, 5900 | tcp | incoming | Honeypot: Heralding |
| 3000 | tcp | incoming | Honeypot: Honeyaml |
| 21, 22, 23, 25, 80, 110, 143, 389, 443, 445, 631, 1080, 1433, 1521, 3306, 3389, 5060, 5432, 5900, 6379, 6667, 8080, 9100, 9200, 11211 | tcp | incoming | Honeypot: qHoneypots |
| 53, 123, 161, 5060 | udp | incoming | Honeypot: qHoneypots |
| 631 | tcp | incoming | Honeypot: IPPHoney |
| 80, 443, 8080, 9200, 25565 | tcp | incoming | Honeypot: Log4Pot |
| 25 | tcp | incoming | Honeypot: Mailoney |
| 2575 | tcp | incoming | Honeypot: Medpot |
| 9100 | tcp | incoming | Honeypot: Miniprint |
| 6379 | tcp | incoming | Honeypot: Redishoneypot |
| 5060 | tcp/udp | incoming | Honeypot: SentryPeer |
| 80 | tcp | incoming | Honeypot: Snare (Tanner) |
@ -292,16 +268,6 @@ Ports and availability of SaaS services may vary based on your geographical loca
For some honeypots to reach full functionality (i.e. Cowrie or Log4Pot) outgoing connections are necessary as well, in order for them to download the attacker's malware. Please see the individual honeypot's documentation to learn more by following the [links](#technical-concept) to their repositories.
## LLM-Based Honeypots
We think LLM-Based Honeypots mark the **beginning** of a game change for the deception / honeypot field. Consequently, starting with the release of **T-Pot 24.04.1**, two LLM-based honeypots, **Beelzebub** and **Galah**, have been introduced. These honeypots require an installation of **Ollama**, which needs to be configured in the [T-Pot configuration file](#t-pot-config-file). You can also adjust the settings in this file for **ChatGPT** support, but note that changes will also be required in the docker compose file (`~/tpotce/compose/llm.yml`) to accommodate these adjustments.<br><br>
Follow the links in the [Honeypots and Tools](#honeypots-and-tools) section to find out more about **Beelzebub** and **Galah**.
### Ollama
🚨 **CPU-based usage is not recommended**, not even for testing.<br><br>
To set up and run **Ollama**, refer to the [Ollama GitHub repository](https://github.com/ollama/ollama) for instructions. For entry-level or testing purposes, results can be achieved using a **Nvidia RTX 4060 Ti 16GB** or equivalent (AMD's ROCm is also supported by Ollama), with models like **openchat** and **Llama3**. As a general rule with LLM-based systems, the better and more hardware you use, the faster and more accurate the results will be, especially when tasks are offloaded to multiple GPUs and larger models.
### ChatGPT
ChatGPT support for these honeypots will remain untested in relation to T-Pot.
<br><br>
# System Placement
@ -315,28 +281,22 @@ Once you are familiar with how things work you should choose a network you suspe
<br><br>
## Choose your distro
**Steps to Follow:**
Choose a supported distro of your choice. It is recommended to use the minimum / netiso installers linked below and only install a minimalistic set of packages. SSH is mandatory or you will not be able to connect to the machine remotely.
1. Download a supported Linux distribution from the list below.
2. During installation choose a **minimum**, **netinstall** or **server** version that will only install essential packages.
3. **Never** install a graphical desktop environment such as Gnome or KDE. T-Pot will fail to work with it due to port conflicts.
4. Make sure to install SSH, so you can connect to the machine remotely.
| Distribution Name | x64 | arm64 |
| :--------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------ | :-------------------------------------------------------------------------------------------------------------------------------------- |
| [Alma Linux OS 9.5 Boot ISO](https://almalinux.org) | [download](https://repo.almalinux.org/almalinux/9.5/isos/x86_64/AlmaLinux-9.5-x86_64-boot.iso) | [download](https://repo.almalinux.org/almalinux/9.5/isos/aarch64/AlmaLinux-9.5-aarch64-boot.iso) |
| [Debian 12 Network Install](https://www.debian.org/CD/netinst/index.en.html) | [download](https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-12.10.0-amd64-netinst.iso) | [download](https://cdimage.debian.org/debian-cd/current/arm64/iso-cd/debian-12.10.0-arm64-netinst.iso) |
| [Fedora Server 42 Network Install](https://fedoraproject.org/server/download) | [download](https://download.fedoraproject.org/pub/fedora/linux/releases/42/Server/x86_64/iso/Fedora-Server-netinst-x86_64-42-1.1.iso) | [download](https://download.fedoraproject.org/pub/fedora/linux/releases/42/Server/aarch64/iso/Fedora-Server-netinst-aarch64-42-1.1.iso) |
| [OpenSuse Tumbleweed Network Image](https://get.opensuse.org/tumbleweed/#download) | [download](https://download.opensuse.org/tumbleweed/iso/openSUSE-Tumbleweed-NET-x86_64-Current.iso) | [download](https://download.opensuse.org/ports/aarch64/tumbleweed/iso/openSUSE-Tumbleweed-NET-aarch64-Current.iso) |
| [Rocky Linux OS 9.5 Boot ISO](https://rockylinux.org/download) | [download](https://download.rockylinux.org/pub/rocky/9/isos/x86_64/Rocky-9.5-x86_64-minimal.iso) | [download](https://download.rockylinux.org/pub/rocky/9/isos/aarch64/Rocky-9.5-aarch64-minimal.iso) |
| [Ubuntu 24.04.1 Live Server](https://ubuntu.com/download/server) | [download](https://releases.ubuntu.com/24.04/ubuntu-24.04.1-live-server-amd64.iso) | [download](https://cdimage.ubuntu.com/releases/24.04/release/ubuntu-24.04.1-live-server-arm64.iso) |
| Distribution Name | x64 | arm64 |
|:--------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------|
| [Alma Linux Boot](https://almalinux.org) | [download](https://repo.almalinux.org/almalinux/9.3/isos/x86_64/AlmaLinux-9.3-x86_64-boot.iso) | [download](https://repo.almalinux.org/almalinux/9.3/isos/aarch64/AlmaLinux-9.3-aarch64-boot.iso) |
| [Debian Netinst](https://www.debian.org/index.en.html) | [download](https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-12.5.0-amd64-netinst.iso) | [download](https://cdimage.debian.org/debian-cd/current/arm64/iso-cd/debian-12.5.0-arm64-netinst.iso) |
| [Fedora Netinst](https://fedoraproject.org) | [download](https://download.fedoraproject.org/pub/fedora/linux/releases/39/Server/x86_64/iso/Fedora-Server-netinst-x86_64-39-1.5.iso) | [download](https://download.fedoraproject.org/pub/fedora/linux/releases/39/Server/aarch64/iso/Fedora-Server-netinst-aarch64-39-1.5.iso) |
| [OpenSuse Tumbleweed Network Image](https://www.opensuse.org) | [download](https://download.opensuse.org/tumbleweed/iso/openSUSE-Tumbleweed-NET-x86_64-Current.iso) | [download](https://download.opensuse.org/ports/aarch64/tumbleweed/iso/openSUSE-Tumbleweed-NET-aarch64-Current.iso) |
| [Rocky Linux Boot](https://rockylinux.org) | [download](https://download.rockylinux.org/pub/rocky/9/isos/x86_64/Rocky-9.3-x86_64-boot.iso) | [download](https://download.rockylinux.org/pub/rocky/9/isos/aarch64/Rocky-9.3-aarch64-boot.iso) |
| [Ubuntu Live Server](https://ubuntu.com) | [download](https://releases.ubuntu.com/22.04.4/ubuntu-22.04.4-live-server-amd64.iso) | [download](https://cdimage.ubuntu.com/releases/22.04/release/ubuntu-22.04.4-live-server-arm64.iso) |
<br>
## Raspberry Pi 4 (8GB) Support
| Distribution Name | arm64 |
| :--------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- |
|:-----------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------|
| [Raspberry Pi OS (**64Bit, Lite**)](https://www.raspberrypi.com) | [download](https://downloads.raspberrypi.com/raspios_lite_arm64/images/raspios_lite_arm64-2024-03-15/2024-03-15-raspios-bookworm-arm64-lite.img.xz) |
<br><br>
@ -367,10 +327,10 @@ Once you are familiar with how things work you should choose a network you suspe
Sometimes it is just nice if you can spin up a T-Pot instance on macOS or Windows, i.e. for development, testing or just the fun of it. As Docker Desktop is rather limited not all honeypot types or T-Pot features are supported. Also remember, by default the macOS and Windows firewall are blocking access from remote, so testing is limited to the host. For production it is recommended to run T-Pot on [Linux](#choose-your-distro).<br>
To get things up and running just follow these steps:
1. Install Docker Desktop for [macOS](https://docs.docker.com/desktop/install/mac-install/) or [Windows](https://docs.docker.com/desktop/install/windows-install/).
2. Clone the GitHub repository: `git clone https://github.com/telekom-security/tpotce` (in Windows make sure the code is checked out with `LF` instead of `CRLF`!)
2. Clone the GitHub repository: `git clone https://github.com/telekom-security/tpotce`
3. Go to: `cd ~/tpotce`
4. Copy `cp compose/mac_win.yml ./docker-compose.yml`
5. Create a `WEB_USER` by running `~/tpotce/genuser.sh` (macOS) or `~/tpotce/genuserwin.ps1` (Windows)
5. Create a `WEB_USER` by running `~/tpotce/genuser.sh`
6. Adjust the `.env` file by changing `TPOT_OSTYPE=linux` to either `mac` or `win`:
```
# OSType (linux, mac, win)
@ -383,16 +343,16 @@ To get things up and running just follow these steps:
## Installation Types
### Standard / Hive
With T-Pot Standard / Hive all services, tools, honeypots, etc. will be installed on to a single host which also serves as a Hive endpoint. Make sure to meet the [system requirements](#system-requirements). You can adjust `~/tpotce/docker-compose.yml` to your personal use-case or create your very own configuration using `~/tpotce/compose/customizer.py` for a tailored T-Pot experience to your needs.
### Standard / HIVE
With T-Pot Standard / HIVE all services, tools, honeypots, etc. will be installed on to a single host which also serves as a HIVE endpoint. Make sure to meet the [system requirements](#system-requirements). You can adjust `~/tpotce/docker-compose.yml` to your personal use-case or create your very own configuration using `~/tpotce/compose/customizer.py` for a tailored T-Pot experience to your needs.
Once the installation is finished you can proceed to [First Start](#first-start).
<br><br>
### Distributed
### **Distributed**
The distributed version of T-Pot requires at least two hosts
- the T-Pot **Hive**, the standard installation of T-Pot (install this first!),
- and a T-Pot **Sensor**, which will host only the honeypots, some tools and transmit log data to the **Hive**.
- The **Sensor** will not start before finalizing the **Sensor** installation as described in [Distributed Deployment](#distributed-deployment).
- the T-Pot **HIVE**, the standard installation of T-Pot (install this first!),
- and a T-Pot **SENSOR**, which will host only the honeypots, some tools and transmit log data to the **HIVE**.
- The **SENSOR** will not start before finalizing the **SENSOR** installation as described in [Distributed Deployment](#distributed-deployment).
<br><br>
## Uninstall T-Pot
@ -418,42 +378,13 @@ You can also login from your browser and access the T-Pot WebUI and tools: `http
<br><br>
## Standalone First Start
There is not much to do except to login and check via `dps` if all services and honeypots are starting up correctly and login to Kibana and / or Geoip Attack Map to monitor the attacks.
There is not much to do except to login and check via `dps.sh` if all services and honeypots are starting up correctly and login to Kibana and / or Geoip Attack Map to monitor the attacks.
<br><br>
## Distributed Deployment
### Planning and Certificates
The distributed deployment involves planning as **T-Pot Init** will only create a self-signed certificate for the IP of the **Hive** host which usually is suitable for simple setups. Since **logstash** will check for a valid certificate upon connection, a distributed setup involving **Hive** to be reachable on multiple IPs (i.e. RFC 1918 and public NAT IP) and maybe even a domain name will result in a connection error where the certificate cannot be validated as such a setup needs a certificate with a common name and SANs (Subject Alternative Name).<br>
Before deploying any sensors make sure you have planned out domain names and IPs properly to avoid issues with the certificate. For more details see [issue #1543](https://github.com/telekom-security/tpotce/issues/1543).<br>
Adjust the example to your IP / domain setup and follow the commands to change the certificate of **Hive**:
Once you have rebooted the **SENSOR** as instructed by the installer you can continue with the distributed deployment by logging into **HIVE** and go to `cd ~/tpotce` folder.
```
sudo systemctl stop tpot
sudo openssl req \
-nodes \
-x509 \
-sha512 \
-newkey rsa:8192 \
-keyout "$HOME/tpotce/data/nginx/cert/nginx.key" \
-out "$HOME/tpotce/data/nginx/cert/nginx.crt" \
-days 3650 \
-subj '/C=AU/ST=Some-State/O=Internet Widgits Pty Ltd' \
-addext "subjectAltName = IP:192.168.1.200, IP:1.2.3.4, DNS:my.primary.domain, DNS:my.secondary.domain"
sudo chmod 774 $HOME/tpotce/data/nginx/cert/*
sudo chown tpot:tpot $HOME/tpotce/data/nginx/cert/*
sudo systemctl start tpot
```
The T-Pot configuration file (`.env`) does allow to disable the SSL verification for logstash connections from **Sensor** to the **Hive** by setting `LS_SSL_VERIFICATION=none`. For security reasons this is only recommended for lab or test environments.<br><br>
If you choose to use a valid certificate for the **Hive** signed by a CA (i.e. Let's Encrypt), logstash, and therefore the **Sensor**, should have no problems to connect and transmit its logs to the **Hive**.
### Deploying Sensors
Once you have rebooted the **Sensor** as instructed by the installer you can continue with the distributed deployment by logging into **Hive** and go to `cd ~/tpotce` folder. Make sure you understood the [Planning and Certificates](#planning-and-certificates) before continuing with the actual deployment.
If you have not done already generate a SSH key to securely login to the **Sensor** and to allow `Ansible` to run a playbook on the sensor:
If you have not done already generate a SSH key to securely login to the **SENSOR** and to allow `Ansible` to run a playbook on the sensor:
1. Run `ssh-keygen`, follow the instructions and leave the passphrase empty:
```
Generating public/private rsa key pair.
@ -463,10 +394,10 @@ If you have not done already generate a SSH key to securely login to the **Senso
Your identification has been saved in /home/<your_user>/.ssh/id_rsa
Your public key has been saved in /home/<your_user>/.ssh/id_rsa.pub
```
2. Deploy the key to the Sensor by running `ssh-copy-id -p 64295 <Sensor_SSH_USER>@<Sensor_IP>)`:
2. Deploy the key to the SENSOR by running `ssh-copy-id -p 64295 <SENSOR_SSH_USER>@<SENSOR_IP>)`:
```
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/home/<your_user>/.ssh/id_rsa.pub"
The authenticity of host '[<Sensor_IP>]:64295 ([<Sensor_IP>]:64295)' can't be stablished.
The authenticity of host '[<SENSOR_IP>]:64295 ([<SENSOR_IP>]:64295)' can't be stablished.
ED25519 key fingerprint is SHA256:naIDxFiw/skPJadTcgmWZQtgt+CdfRbUCoZn5RmkOnQ.
This key is not known by any other names.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
@ -476,17 +407,13 @@ If you have not done already generate a SSH key to securely login to the **Senso
Number of key(s) added: 1
Now try logging into the machine, with: "ssh -p '64295' '<your_user>@<Sensor_IP>'"
Now try logging into the machine, with: "ssh -p '64295' '<your_user>@<SENSOR_IP>'"
and check to make sure that only the key(s) you wanted were added.
```
3. As suggested follow the instructions to test the connection `ssh -p '64295' '<your_user>@<Sensor_IP>'`.
3. As suggested follow the instructions to test the connection `ssh -p '64295' '<your_user>@<SENSOR_IP>'`.
4. Once the key is successfully deployed run `./deploy.sh` and follow the instructions.
<br><br>
### Removing Sensors
Identify the `TPOT_HIVE_USER` ENV on the Sensor in the `$HOME/tpotce/.env` config (it is a base64 encoded string). Now identify the same string in the `LS_WEB_USER` ENV on the Hive in the `$HOME/tpotce/.env` config. Remove the string and restart T-Pot.<br>
Now you can safely delete the Sensor machine.
## Community Data Submission
T-Pot is provided in order to make it accessible to everyone interested in honeypots. By default, the captured data is submitted to a community backend. This community backend uses the data to feed [Sicherheitstacho](https://sicherheitstacho.eu).
You may opt out of the submission by removing the `# Ewsposter service` from `~/tpotce/docker-compose.yml` by following these steps:
@ -570,7 +497,7 @@ On the T-Pot Landing Page just click on `Cyberchef` and you will be forwarded to
<br><br>
## Elasticvue
On the T-Pot Landing Page just click on `Elasticvue` and you will be forwarded to Elasticvue.
On the T-Pot Landing Page just click on `Elastivue` and you will be forwarded to Elastivue.
![Elasticvue](doc/elasticvue.png)
<br><br>
@ -589,16 +516,15 @@ Before the first start run `~/tpotce/genuser.sh` or setup the `WEB_USER` manuall
## Customize T-Pot Honeypots and Services
In `~/tpotce/compose` you will find everything you need to adjust the T-Pot Standard / Hive installation:
In `~/tpotce/compose` you will find everything you need to adjust the T-Pot Standard / HIVE installation:
```
customizer.py
llm.yml
mac_win.yml
mini.yml
mobile.yml
raspberry_showcase.yml
sensor.yml
standard.yml
tarpit.yml
tpot_services.yml
```
The `.yml` files are docker compose files, each representing a different set of honeypots and tools with `tpot_services.yml` being a template for `customizer.py` to create a customized docker compose file.<br><br>
@ -639,25 +565,18 @@ The update script will ...
- update all files in `~/tpotce` to be in sync with the T-Pot master branch
- restore your custom `ews.cfg` from `~/tpotce/data/ews/conf` and the T-Pot configuration (`~/tpotce/.env`).
## Daily Reboot
By default T-Pot will add a daily reboot including some cleaning up. You can adjust this line with `sudo crontab -e`
```
#Ansible: T-Pot Daily Reboot
42 2 * * * bash -c 'systemctl stop tpot.service && docker container prune -f; docker image prune -f; docker volume prune -f; /usr/sbin/shutdown -r +1 "T-Pot Daily Reboot"'
```
## Known Issues
The following issues are known, simply follow the described steps to solve them.
<br><br>
### Docker Images Fail to Download
### **Docker Images Fail to Download**
Some time ago Docker introduced download [rate limits](https://docs.docker.com/docker-hub/download-rate-limit/#:~:text=Docker%20Hub%20limits%20the%20number,pulls%20per%206%20hour%20period.). If you are frequently downloading Docker images via a single or shared IP, the IP address might have exhausted the Docker download rate limit. Login to your Docker account to extend the rate limit.
```
sudo su -
docker login
```
### T-Pot Networking Fails
### **T-Pot Networking Fails**
T-Pot is designed to only run on machines with a single NIC. T-Pot will try to grab the interface with the default route, however it is not guaranteed that this will always succeed. At best use T-Pot on machines with only a single NIC.
## Start T-Pot
@ -702,7 +621,7 @@ git reset --hard
<br><br>
## Show Containers
You can show all T-Pot relevant containers by running `dps` or `dpsw [interval]`. The `interval (s)` will re-run `dps` periodically.
You can show all T-Pot relevant containers by running `dps` or `dpsw [interval]`. The `interval (s)` will re-run `dps.sh` periodically.
<br><br>
## Blackhole
@ -715,15 +634,14 @@ Enabling this feature will drastically reduce attackers visibility and consequen
## Add Users to Nginx (T-Pot WebUI)
Nginx (T-Pot WebUI) allows you to add as many `<WEB_USER>` accounts as you want (according to the [User Types](#user-types)).<br>
To **add** a new user run `~/tpotce/genuser.sh`.<br>
To **remove** users open `~/tpotce/.env`, locate `WEB_USER` and remove the corresponding base64 string (to decode: `echo <base64_string> | base64 -d`, or open CyberChef and load "From Base64" recipe).<br>
For the changes to take effect you need to restart T-Pot using `systemctl stop tpot` and `systemctl start tpot` or `sudo reboot`.
To **add** a new user run `~/tpotce/genuser.sh` which will also update the accounts without the need to restart T-Pot.<br>
To **remove** users open `~/tpotce/.env`, locate `WEB_USER` and remove the corresponding base64 string (to decode: `echo <base64_string> | base64 -d`, or open CyberChef and load "From Base64" recipe). For the changes to take effect you need to restart T-Pot using `systemctl stop tpot` and `systemctl start tpot` or `sudo reboot`.
<br><br>
## Import and Export Kibana Objects
Some T-Pot updates will require you to update the Kibana objects. Either to support new honeypots or to improve existing dashboards or visualizations. Make sure to ***export*** first so you do not loose any of your adjustments.
### Export
### **Export**
1. Go to Kibana
2. Click on "Stack Management"
3. Click on "Saved Objects"
@ -731,7 +649,7 @@ Some T-Pot updates will require you to update the Kibana objects. Either to supp
5. Click on "Export all"
This will export a NDJSON file with all your objects. Always run a full export to make sure all references are included.
### Import
### **Import**
1. [Download the NDJSON file](https://github.com/dtag-dev-sec/tpotce/blob/master/docker/tpotinit/dist/etc/objects/kibana_export.ndjson.zip) and unzip it.
2. Go to Kibana
3. Click on "Stack Management"
@ -788,123 +706,74 @@ Use the search function, it is possible a similar discussion has been opened alr
# Licenses
The software that T-Pot is built on uses the following licenses.
<br>GPLv2:
[conpot](https://github.com/mushorg/conpot/blob/master/LICENSE.txt),
[galah](https://github.com/0x4D31/galah?tab=Apache-2.0-1-ov-file#readme),
[dionaea](https://github.com/DinoTools/dionaea/blob/master/LICENSE),
[honeytrap](https://github.com/armedpot/honeytrap/blob/master/LICENSE),
[suricata](https://suricata.io/features/open-source/)
<br>GPLv3:
[adbhoney](https://github.com/huuck/ADBHoney),
[elasticpot](https://gitlab.com/bontchev/elasticpot/-/blob/master/LICENSE),
[ewsposter](https://github.com/telekom-security/ews/),
[log4pot](https://github.com/thomaspatzke/Log4Pot/blob/master/LICENSE),
[fatt](https://github.com/0x4D31/fatt/blob/master/LICENSE),
[heralding](https://github.com/johnnykv/heralding/blob/master/LICENSE.txt),
[ipphoney](https://gitlab.com/bontchev/ipphoney/-/blob/master/LICENSE),
[miniprint](https://github.com/sa7mon/miniprint?tab=GPL-3.0-1-ov-file#readme),
[redishoneypot](https://github.com/cypwnpwnsocute/RedisHoneyPot/blob/main/LICENSE),
[sentrypeer](https://github.com/SentryPeer/SentryPeer/blob/main/LICENSE.GPL-3.0-only),
[snare](https://github.com/mushorg/snare/blob/master/LICENSE),
[tanner](https://github.com/mushorg/snare/blob/master/LICENSE)
<br>Apache 2 License:
[cyberchef](https://github.com/gchq/CyberChef/blob/master/LICENSE),
[dicompot](https://github.com/nsmfoo/dicompot/blob/master/LICENSE),
[elasticsearch](https://github.com/elasticsearch/elasticsearch/blob/master/LICENSE.txt),
[go-pot](https://github.com/ryanolee/go-pot?tab=License-1-ov-file#readme),
[h0neytr4p](https://github.com/pbssubhash/h0neytr4p?tab=Apache-2.0-1-ov-file#readme),
[logstash](https://github.com/elasticsearch/logstash/blob/master/LICENSE),
[kibana](https://github.com/elasticsearch/kibana/blob/master/LICENSE.md),
[docker](https://github.com/docker/docker/blob/master/LICENSE)
<br>MIT license:
[autoheal](https://github.com/willfarrell/docker-autoheal?tab=MIT-1-ov-file#readme),
[beelzebub](https://github.com/mariocandela/beelzebub?tab=MIT-1-ov-file#readme),
[ciscoasa](https://github.com/Cymmetria/ciscoasa_honeypot/blob/master/LICENSE),
[ddospot](https://github.com/aelth/ddospot/blob/master/LICENSE),
[elasticvue](https://github.com/cars10/elasticvue/blob/master/LICENSE),
[glutton](https://github.com/mushorg/glutton/blob/master/LICENSE),
[hellpot](https://github.com/yunginnanet/HellPot/blob/master/LICENSE),
[honeyaml](https://github.com/mmta/honeyaml?tab=MIT-1-ov-file#readme),
[maltrail](https://github.com/stamparm/maltrail/blob/master/LICENSE)
<br>Unlicense:
[endlessh](https://github.com/skeeto/endlessh/blob/master/UNLICENSE)
<br>Other:
[citrixhoneypot](https://github.com/MalwareTech/CitrixHoneypot#licencing-agreement-malwaretech-public-licence),
[cowrie](https://github.com/cowrie/cowrie/blob/master/LICENSE.rst),
[mailoney](https://github.com/awhitehatter/mailoney),
[Elastic License](https://www.elastic.co/licensing/elastic-license),
[Wordpot](https://github.com/gbrindisi/wordpot)
<br>AGPL-3.0:
[honeypots](https://github.com/qeeqbox/honeypots/blob/main/LICENSE)
<br>[Public Domain (CC)](https://creativecommons.org/publicdomain/zero/1.0/):
[Harvard Dataverse](https://dataverse.harvard.edu/dataverse/harvard/?q=dicom)
<br>GPLv2: [conpot](https://github.com/mushorg/conpot/blob/master/LICENSE.txt), [dionaea](https://github.com/DinoTools/dionaea/blob/master/LICENSE), [honeytrap](https://github.com/armedpot/honeytrap/blob/master/LICENSE), [suricata](http://suricata-ids.org/about/open-source/)
<br>GPLv3: [adbhoney](https://github.com/huuck/ADBHoney), [elasticpot](https://gitlab.com/bontchev/elasticpot/-/blob/master/LICENSE), [ewsposter](https://github.com/telekom-security/ews/), [log4pot](https://github.com/thomaspatzke/Log4Pot/blob/master/LICENSE), [fatt](https://github.com/0x4D31/fatt/blob/master/LICENSE), [heralding](https://github.com/johnnykv/heralding/blob/master/LICENSE.txt), [ipphoney](https://gitlab.com/bontchev/ipphoney/-/blob/master/LICENSE), [redishoneypot](https://github.com/cypwnpwnsocute/RedisHoneyPot/blob/main/LICENSE), [sentrypeer](https://github.com/SentryPeer/SentryPeer/blob/main/LICENSE.GPL-3.0-only), [snare](https://github.com/mushorg/snare/blob/master/LICENSE), [tanner](https://github.com/mushorg/snare/blob/master/LICENSE)
<br>Apache 2 License: [cyberchef](https://github.com/gchq/CyberChef/blob/master/LICENSE), [dicompot](https://github.com/nsmfoo/dicompot/blob/master/LICENSE), [elasticsearch](https://github.com/elasticsearch/elasticsearch/blob/master/LICENSE.txt), [logstash](https://github.com/elasticsearch/logstash/blob/master/LICENSE), [kibana](https://github.com/elasticsearch/kibana/blob/master/LICENSE.md), [docker](https://github.com/docker/docker/blob/master/LICENSE)
<br>MIT license: [autoheal](https://github.com/willfarrell/docker-autoheal?tab=MIT-1-ov-file#readme), [ciscoasa](https://github.com/Cymmetria/ciscoasa_honeypot/blob/master/LICENSE), [ddospot](https://github.com/aelth/ddospot/blob/master/LICENSE), [elasticvue](https://github.com/cars10/elasticvue/blob/master/LICENSE), [glutton](https://github.com/mushorg/glutton/blob/master/LICENSE), [hellpot](https://github.com/yunginnanet/HellPot/blob/master/LICENSE), [maltrail](https://github.com/stamparm/maltrail/blob/master/LICENSE)
<br> Unlicense: [endlessh](https://github.com/skeeto/endlessh/blob/master/UNLICENSE)
<br> Other: [citrixhoneypot](https://github.com/MalwareTech/CitrixHoneypot#licencing-agreement-malwaretech-public-licence), [cowrie](https://github.com/cowrie/cowrie/blob/master/LICENSE.rst), [mailoney](https://github.com/awhitehatter/mailoney), [Elastic License](https://www.elastic.co/licensing/elastic-license), [Wordpot](https://github.com/gbrindisi/wordpot)
<br> AGPL-3.0: [honeypots](https://github.com/qeeqbox/honeypots/blob/main/LICENSE)
<br> [Public Domain (CC)](https://creativecommons.org/publicdomain/zero/1.0/): [Harvard Dataverse](https://dataverse.harvard.edu/dataverse/harvard/?q=dicom)
<br><br>
# Credits
Without open source and the development community we are proud to be a part of, T-Pot would not have been possible! Our thanks are extended but not limited to the following people and organizations:
### The developers and development communities of
* [adbhoney](https://github.com/huuck/ADBHoney/graphs/contributors)
* [ciscoasa](https://github.com/Cymmetria/ciscoasa_honeypot/graphs/contributors)
* [citrixhoneypot](https://github.com/MalwareTech/CitrixHoneypot/graphs/contributors)
* [conpot](https://github.com/mushorg/conpot/graphs/contributors)
* [cowrie](https://github.com/cowrie/cowrie/graphs/contributors)
* [ddospot](https://github.com/aelth/ddospot/graphs/contributors)
* [dicompot](https://github.com/nsmfoo/dicompot/graphs/contributors)
* [dionaea](https://github.com/DinoTools/dionaea/graphs/contributors)
* [docker](https://github.com/docker/docker/graphs/contributors)
* [elasticpot](https://gitlab.com/bontchev/elasticpot/-/project_members)
* [elasticsearch](https://github.com/elastic/elasticsearch/graphs/contributors)
* [elasticvue](https://github.com/cars10/elasticvue/graphs/contributors)
* [endlessh](https://github.com/skeeto/endlessh/graphs/contributors)
* [ewsposter](https://github.com/armedpot/ewsposter/graphs/contributors)
* [fatt](https://github.com/0x4D31/fatt/graphs/contributors)
* [glutton](https://github.com/mushorg/glutton/graphs/contributors)
* [hellpot](https://github.com/yunginnanet/HellPot/graphs/contributors)
* [heralding](https://github.com/johnnykv/heralding/graphs/contributors)
* [honeypots](https://github.com/qeeqbox/honeypots/graphs/contributors)
* [honeytrap](https://github.com/armedpot/honeytrap/graphs/contributors)
* [ipphoney](https://gitlab.com/bontchev/ipphoney/-/project_members)
* [kibana](https://github.com/elastic/kibana/graphs/contributors)
* [logstash](https://github.com/elastic/logstash/graphs/contributors)
* [log4pot](https://github.com/thomaspatzke/Log4Pot/graphs/contributors)
* [mailoney](https://github.com/awhitehatter/mailoney)
* [maltrail](https://github.com/stamparm/maltrail/graphs/contributors)
* [medpot](https://github.com/schmalle/medpot/graphs/contributors)
* [p0f](http://lcamtuf.coredump.cx/p0f3/)
* [redishoneypot](https://github.com/cypwnpwnsocute/RedisHoneyPot/graphs/contributors)
* [sentrypeer](https://github.com/SentryPeer/SentryPeer/graphs/contributors)
* [spiderfoot](https://github.com/smicallef/spiderfoot)
* [snare](https://github.com/mushorg/snare/graphs/contributors)
* [tanner](https://github.com/mushorg/tanner/graphs/contributors)
* [suricata](https://github.com/inliniac/suricata/graphs/contributors)
* [wordpot](https://github.com/gbrindisi/wordpot)
**The following companies and organizations**
* [docker](https://www.docker.com/)
* [elastic.io](https://www.elastic.co/)
* [honeynet project](https://www.honeynet.org/)
**... and of course ***you*** for joining the community!**
<br><br>
## The developers and development communities of
* [adbhoney](https://github.com/huuck/ADBHoney/graphs/contributors),
[beelzebub](https://github.com/mariocandela/beelzebub/graphs/contributors),
[ciscoasa](https://github.com/Cymmetria/ciscoasa_honeypot/graphs/contributors),
[citrixhoneypot](https://github.com/MalwareTech/CitrixHoneypot/graphs/contributors),
[conpot](https://github.com/mushorg/conpot/graphs/contributors),
[cowrie](https://github.com/cowrie/cowrie/graphs/contributors),
[ddospot](https://github.com/aelth/ddospot/graphs/contributors),
[dicompot](https://github.com/nsmfoo/dicompot/graphs/contributors),
[dionaea](https://github.com/DinoTools/dionaea/graphs/contributors),
[docker](https://github.com/docker/docker/graphs/contributors),
[elasticpot](https://gitlab.com/bontchev/elasticpot/-/project_members),
[elasticsearch](https://github.com/elastic/elasticsearch/graphs/contributors),
[elasticvue](https://github.com/cars10/elasticvue/graphs/contributors),
[endlessh](https://github.com/skeeto/endlessh/graphs/contributors),
[ewsposter](https://github.com/armedpot/ewsposter/graphs/contributors),
[fatt](https://github.com/0x4D31/fatt/graphs/contributors),
[galah](https://github.com/0x4D31/galah/graphs/contributors),
[glutton](https://github.com/mushorg/glutton/graphs/contributors),
[go-pot](https://github.com/ryanolee/go-pot/graphs/contributors),
[h0neytr4p](https://github.com/pbssubhash/h0neytr4p/graphs/contributors),
[hellpot](https://github.com/yunginnanet/HellPot/graphs/contributors),
[heralding](https://github.com/johnnykv/heralding/graphs/contributors),
[honeyaml](https://github.com/mmta/honeyaml/graphs/contributors),
[honeypots](https://github.com/qeeqbox/honeypots/graphs/contributors),
[honeytrap](https://github.com/armedpot/honeytrap/graphs/contributors),
[ipphoney](https://gitlab.com/bontchev/ipphoney/-/project_members),
[kibana](https://github.com/elastic/kibana/graphs/contributors),
[logstash](https://github.com/elastic/logstash/graphs/contributors),
[log4pot](https://github.com/thomaspatzke/Log4Pot/graphs/contributors),
[mailoney](https://github.com/awhitehatter/mailoney),
[maltrail](https://github.com/stamparm/maltrail/graphs/contributors),
[medpot](https://github.com/schmalle/medpot/graphs/contributors),
[miniprint](https://github.com/sa7mon/miniprint/graphs/contributors),
[p0f](http://lcamtuf.coredump.cx/p0f3/),
[redishoneypot](https://github.com/cypwnpwnsocute/RedisHoneyPot/graphs/contributors),
[sentrypeer](https://github.com/SentryPeer/SentryPeer/graphs/contributors),
[spiderfoot](https://github.com/smicallef/spiderfoot),
[snare](https://github.com/mushorg/snare/graphs/contributors),
[tanner](https://github.com/mushorg/tanner/graphs/contributors),
[suricata](https://github.com/OISF/suricata/graphs/contributors),
[wordpot](https://github.com/gbrindisi/wordpot)
<br><br>
## **The following companies and organizations**
* [docker](https://www.docker.com/),
[elastic.io](https://www.elastic.co/),
[honeynet project](https://www.honeynet.org/)
<br><br>
## **And of course ***YOU*** for joining the community!**
<br>
Thank you for playing 💖
# Testimonials
One of the greatest feedback we have gotten so far is by one of the Conpot developers:<br>
***"[...] I highly recommend T-Pot which is ... it's not exactly a swiss army knife .. it's more like a swiss army soldier, equipped with a swiss army knife. Inside a tank. A swiss tank. [...]"***
<br><br>
And from @robcowart (creator of [ElastiFlow](https://github.com/robcowart/elastiflow)):<br>
***"#TPot is one of the most well put together turnkey honeypot solutions. It is a must-have for anyone wanting to analyze and understand the behavior of malicious actors and the threat they pose to your organization."***
<br><br>
**Thank you!**
# Thank you 💖
![Alt](https://repobeats.axiom.co/api/embed/642a1032ac85996c81b12cf9f6393103058b8a04.svg "Repobeats analytics image")

View file

@ -3,8 +3,8 @@
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| 24.04.1 | :white_check_mark: |
|-------|--------------------|
| 24.04 | :white_check_mark: |
## Reporting a Vulnerability
@ -13,7 +13,7 @@ We prioritize the security of T-Pot highly. Often, vulnerabilities in T-Pot comp
Please follow these steps before reporting a potential vulnerability:
1. Verify that the behavior you've observed isn't already documented as a normal aspect or unrelated issue of T-Pot. For example, Cowrie may initiate outgoing connections, or T-Pot might open all possible TCP portsa feature enabled by Honeytrap.
1. Verify that the behavior you've observed isn't already documented as a normal aspect or unrelated issue of T-Pot. For example, Cowrie may initiate outgoing connections, or T-Pot might open all possible TCP portsa feature enabled by Honeytrap.
2. Clearly identify which component is vulnerable (e.g., a specific honeypot, Docker image, tool, package) and isolate the issue.
3. Provide a detailed description of the issue, including log and, if available, debug files. Include all steps necessary to reproduce the vulnerability. If you have a proposed solution, hotfix, or patch, please be prepared to submit a pull request (PR).
4. Check whether the vulnerability is already known upstream. If there is an existing fix or patch, include that information in your report.

View file

@ -9,10 +9,10 @@ version = \
___) | __/ | \ V /| | (_| __/ | |_) | |_| | | | (_| | __/ |
|____/ \___|_| \_/ |_|\___\___| |____/ \__,_|_|_|\__,_|\___|_| v0.21
# This script is intended for users who want to build a customized docker-compose.yml for T-Pot.
# This script is intended for users who want to build a customized docker-compose.yml forT-Pot.
# T-Pot Service Builder will ask for all the docker services to be included in docker-compose.yml.
# The configuration file will be checked for conflicting ports.
# Port conflicts have to be resolved manually or re-running the script and excluding the conflicting services.
# Port conflicts have to be resolve manually or re-running the script and excluding the conflicting services.
# Review the resulting docker-compose-custom.yml and adjust to your needs by (un)commenting the corresponding lines in the config.
"""
@ -157,6 +157,7 @@ def main():
remove_unused_networks(selected_services, services, networks)
output_config = {
'version': '3.9',
'networks': networks,
'services': selected_services,
}

View file

@ -1,350 +0,0 @@
# T-Pot: LLM
networks:
beelzebub_local:
galah_local:
nginx_local:
ewsposter_local:
services:
#########################################
#### DEV
#########################################
#### T-Pot Init - Never delete this!
#########################################
# T-Pot Init Service
tpotinit:
container_name: tpotinit
env_file:
- .env
restart: always
stop_grace_period: 60s
tmpfs:
- /tmp/etc:uid=2000,gid=2000
- /tmp/:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
- ${TPOT_DATA_PATH}:/data
- /var/run/docker.sock:/var/run/docker.sock:ro
##################
#### Honeypots
##################
# Beelzebub service
beelzebub:
container_name: beelzebub
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- beelzebub_local
ports:
- "22:22"
# - "80:80"
# - "2222:2222"
# - "3306:3306"
# - "8080:8080"
image: ${TPOT_REPO}/beelzebub:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
environment:
LLM_MODEL: ${BEELZEBUB_LLM_MODEL}
LLM_HOST: ${BEELZEBUB_LLM_HOST}
OLLAMA_MODEL: ${BEELZEBUB_OLLAMA_MODEL}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/beelzebub/key:/opt/beelzebub/configurations/key
- ${TPOT_DATA_PATH}/beelzebub/log:/opt/beelzebub/configurations/log
# Galah service
galah:
container_name: galah
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- galah_local
ports:
- "80:80"
- "443:443"
- "8443:8443"
- "8080:8080"
image: ${TPOT_REPO}/galah:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
environment:
LLM_PROVIDER: ${GALAH_LLM_PROVIDER}
LLM_SERVER_URL: ${GALAH_LLM_SERVER_URL}
LLM_MODEL: ${GALAH_LLM_MODEL}
# LLM_TEMPERATURE: ${GALAH_LLM_TEMPERATURE}
# LLM_API_KEY: ${GALAH_LLM_API_KEY}
# LLM_CLOUD_LOCATION: ${GALAH_LLM_CLOUD_LOCATION}
# LLM_CLOUD_PROJECT: ${GALAH_LLM_CLOUD_PROJECT}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/galah/cache:/opt/galah/config/cache
- ${TPOT_DATA_PATH}/galah/cert:/opt/galah/config/cert
- ${TPOT_DATA_PATH}/galah/log:/opt/galah/log
##################
#### NSM
##################
# Fatt service
fatt:
container_name: fatt
restart: always
depends_on:
tpotinit:
condition: service_healthy
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
# P0f service
p0f:
container_name: p0f
restart: always
depends_on:
tpotinit:
condition: service_healthy
network_mode: "host"
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
# Suricata service
suricata:
container_name: suricata
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
##################
#### Tools
##################
#### ELK
## Elasticsearch service
elasticsearch:
container_name: elasticsearch
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
environment:
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
- ES_TMPDIR=/tmp
cap_add:
- IPC_LOCK
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
mem_limit: 4g
ports:
- "127.0.0.1:64298:9200"
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
## Kibana service
kibana:
container_name: kibana
restart: always
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
mem_limit: 1g
ports:
- "127.0.0.1:64296:5601"
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
## Logstash service
logstash:
container_name: logstash
restart: always
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
## Map Redis Service
map_redis:
container_name: map_redis
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
stop_signal: SIGKILL
tty: true
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## Map Web Service
map_web:
container_name: map_web
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
environment:
- MAP_COMMAND=AttackMapServer.py
stop_signal: SIGKILL
tty: true
ports:
- "127.0.0.1:64299:64299"
image: ${TPOT_REPO}/map:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
## Map Data Service
map_data:
container_name: map_data
restart: always
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
environment:
- MAP_COMMAND=DataServer_v2.py
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
stop_signal: SIGKILL
tty: true
image: ${TPOT_REPO}/map:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
#### /ELK
# Ewsposter service
ewsposter:
container_name: ewsposter
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ewsposter_local
environment:
- EWS_HPFEEDS_ENABLE=false
- EWS_HPFEEDS_HOST=host
- EWS_HPFEEDS_PORT=port
- EWS_HPFEEDS_CHANNELS=channels
- EWS_HPFEEDS_IDENT=user
- EWS_HPFEEDS_SECRET=secret
- EWS_HPFEEDS_TLSCERT=false
- EWS_HPFEEDS_FORMAT=json
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
# Nginx service
nginx:
container_name: nginx
restart: always
environment:
- TPOT_OSTYPE=${TPOT_OSTYPE}
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /var/tmp/nginx/client_body
- /var/tmp/nginx/proxy
- /var/tmp/nginx/fastcgi
- /var/tmp/nginx/uwsgi
- /var/tmp/nginx/scgi
- /run
- /var/lib/nginx/tmp:uid=100,gid=82
networks:
- nginx_local
ports:
- "64297:64297"
- "64294:64294"
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
# Spiderfoot service
spiderfoot:
container_name: spiderfoot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
ports:
- "127.0.0.1:64303:8080"
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot

View file

@ -1,22 +1,26 @@
# T-Pot: MAC_WIN
version: '3.9'
networks:
tpotinit_local:
adbhoney_local:
ciscoasa_local:
citrixhoneypot_local:
conpot_local_IEC104:
conpot_local_guardian_ast:
conpot_local_ipmi:
conpot_local_kamstrup_382:
cowrie_local:
ddospot_local:
dicompot_local:
dionaea_local:
elasticpot_local:
h0neytr4p_local:
heralding_local:
honeyaml_local:
ipphoney_local:
mailoney_local:
medpot_local:
miniprint_local:
redishoneypot_local:
sentrypeer_local:
suricata_local:
tanner_local:
wordpot_local:
nginx_local:
@ -48,7 +52,6 @@ services:
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
- ${TPOT_DATA_PATH}:/data
- /var/run/docker.sock:/var/run/docker.sock:ro
##################
@ -93,6 +96,125 @@ services:
volumes:
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
# CitrixHoneypot service
citrixhoneypot:
container_name: citrixhoneypot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- citrixhoneypot_local
ports:
- "443:443"
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/citrixhoneypot/log:/opt/citrixhoneypot/logs
# Conpot IEC104 service
conpot_IEC104:
container_name: conpot_iec104
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
- CONPOT_TEMPLATE=IEC104
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_IEC104
ports:
- "161:161/udp"
- "2404:2404"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot guardian_ast service
conpot_guardian_ast:
container_name: conpot_guardian_ast
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
- CONPOT_TEMPLATE=guardian_ast
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_guardian_ast
ports:
- "10001:10001"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot ipmi
conpot_ipmi:
container_name: conpot_ipmi
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
- CONPOT_TEMPLATE=ipmi
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_ipmi
ports:
- "623:623/udp"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot kamstrup_382
conpot_kamstrup_382:
container_name: conpot_kamstrup_382
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
- CONPOT_TEMPLATE=kamstrup_382
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_kamstrup_382
ports:
- "1025:1025"
- "50100:50100"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Cowrie service
cowrie:
container_name: cowrie
@ -117,6 +239,29 @@ services:
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
# Ddospot service
ddospot:
container_name: ddospot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ddospot_local
ports:
- "19:19/udp"
- "53:53/udp"
- "123:123/udp"
# - "161:161/udp"
- "1900:1900/udp"
image: ${TPOT_REPO}/ddospot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ddospot/log:/opt/ddospot/ddospot/logs
- ${TPOT_DATA_PATH}/ddospot/bl:/opt/ddospot/ddospot/bl
- ${TPOT_DATA_PATH}/ddospot/db:/opt/ddospot/ddospot/db
# Dicompot service
# Get the Horos Client for testing: https://horosproject.org/
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
@ -130,7 +275,6 @@ services:
networks:
- dicompot_local
ports:
- "104:11112"
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
@ -158,7 +302,7 @@ services:
- "81:81"
- "135:135"
# - "443:443"
# - "445:445"
- "445:445"
- "1433:1433"
- "1723:1723"
- "1883:1883"
@ -197,25 +341,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
# H0neytr4p service
h0neytr4p:
container_name: h0neytr4p
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- h0neytr4p_local
ports:
- "443:443"
# - "80:80"
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
# Heralding service
heralding:
container_name: heralding
@ -250,23 +375,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
# Honeyaml service
honeyaml:
container_name: honeyaml
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- honeyaml_local
ports:
- "3000:8080"
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
# Ipphoney service
ipphoney:
container_name: ipphoney
@ -287,12 +395,16 @@ services:
# Mailoney service
mailoney:
container_name: mailoney
stdin_open: true
tty: true
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- HPFEEDS_SERVER=
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=20000
- HPFEEDS_CHANNELPREFIX=prefix
networks:
- mailoney_local
ports:
@ -321,24 +433,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
# Miniprint service
miniprint:
container_name: miniprint
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- miniprint_local
ports:
- "9100:9100"
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
# Redishoneypot service
redishoneypot:
container_name: redishoneypot
@ -471,7 +565,6 @@ services:
ports:
- "8080:80"
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
@ -523,16 +616,15 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- suricata_local
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
environment:
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
@ -603,7 +695,6 @@ services:
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g

View file

@ -1,4 +1,6 @@
# T-Pot: MINI
version: '3.9'
networks:
adbhoney_local:
ciscoasa_local:
@ -9,7 +11,7 @@ networks:
dicompot_local:
honeypots_local:
medpot_local:
nginx_local:
spiderfoot_local:
ewsposter_local:
services:
@ -199,7 +201,6 @@ services:
networks:
- dicompot_local
ports:
- "104:11112"
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
@ -226,8 +227,7 @@ services:
- "22:22"
- "23:23"
- "25:25"
- "53:53"
- "67:67/udp"
- "53:53/udp"
- "80:80"
- "110:110"
- "123:123"
@ -365,8 +365,6 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
environment:
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
@ -395,8 +393,6 @@ services:
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
mem_limit: 1g
ports:
- "127.0.0.1:64296:5601"
@ -410,14 +406,11 @@ services:
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
@ -433,8 +426,6 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
stop_signal: SIGKILL
tty: true
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
@ -448,8 +439,6 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
environment:
- MAP_COMMAND=AttackMapServer.py
stop_signal: SIGKILL
@ -466,8 +455,6 @@ services:
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
environment:
- MAP_COMMAND=DataServer_v2.py
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
@ -519,11 +506,9 @@ services:
- /var/tmp/nginx/scgi
- /run
- /var/lib/nginx/tmp:uid=100,gid=82
networks:
- nginx_local
network_mode: "host"
ports:
- "64297:64297"
- "64294:64294"
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
@ -541,7 +526,7 @@ services:
tpotinit:
condition: service_healthy
networks:
- nginx_local
- spiderfoot_local
ports:
- "127.0.0.1:64303:8080"
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}

View file

@ -3,8 +3,11 @@
# T-Pot on a Raspberry Pi 4 (8GB of RAM).
# The standard docker compose file should work mostly fine (depending on traffic) if you do not enable a
# desktop environment such as LXDE and meet the minimum requirements of 8GB RAM.
version: '3.9'
networks:
ciscoasa_local:
citrixhoneypot_local:
conpot_local_IEC104:
conpot_local_ipmi:
conpot_local_kamstrup_382:
@ -12,14 +15,11 @@ networks:
dicompot_local:
dionaea_local:
elasticpot_local:
h0neytr4p_local:
heralding_local:
honeyaml_local:
ipphoney_local:
log4pot_local:
mailoney_local:
medpot_local:
miniprint_local:
redishoneypot_local:
sentrypeer_local:
tanner_local:
@ -79,6 +79,23 @@ services:
volumes:
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
# CitrixHoneypot service
citrixhoneypot:
container_name: citrixhoneypot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- citrixhoneypot_local
ports:
- "443:443"
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/citrixhoneypot/log:/opt/citrixhoneypot/logs
# Conpot IEC104 service
conpot_IEC104:
container_name: conpot_iec104
@ -193,7 +210,6 @@ services:
networks:
- dicompot_local
ports:
- "104:11112"
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
@ -260,25 +276,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
# H0neytr4p service
h0neytr4p:
container_name: h0neytr4p
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- h0neytr4p_local
ports:
- "443:443"
# - "80:80"
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
# Heralding service
heralding:
container_name: heralding
@ -313,23 +310,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
# Honeyaml service
honeyaml:
container_name: honeyaml
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- honeyaml_local
ports:
- "3000:8080"
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
# Honeytrap service
honeytrap:
container_name: honeytrap
@ -367,6 +347,30 @@ services:
volumes:
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
# Mailoney service
mailoney:
container_name: mailoney
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- HPFEEDS_SERVER=
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=20000
- HPFEEDS_CHANNELPREFIX=prefix
networks:
- mailoney_local
ports:
- "25:25"
- "587:25"
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
# Log4pot service
log4pot:
container_name: log4pot
@ -381,7 +385,7 @@ services:
ports:
# - "80:8080"
# - "443:8080"
# - "8080:8080"
- "8080:8080"
# - "9200:8080"
- "25565:8080"
image: ${TPOT_REPO}/log4pot:${TPOT_VERSION}
@ -391,26 +395,6 @@ services:
- ${TPOT_DATA_PATH}/log4pot/log:/var/log/log4pot/log
- ${TPOT_DATA_PATH}/log4pot/payloads:/var/log/log4pot/payloads
# Mailoney service
mailoney:
container_name: mailoney
stdin_open: true
tty: true
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- mailoney_local
ports:
- "25:25"
- "587:25"
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
# Medpot service
medpot:
container_name: medpot
@ -428,24 +412,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
# Miniprint service
miniprint:
container_name: miniprint
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- miniprint_local
ports:
- "9100:9100"
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
# Redishoneypot service
redishoneypot:
container_name: redishoneypot
@ -571,14 +537,13 @@ services:
container_name: wordpot
restart: always
depends_on:
logstash:
tpotinit:
condition: service_healthy
networks:
- wordpot_local
ports:
- "8080:80"
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
@ -629,7 +594,6 @@ services:
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g

View file

@ -0,0 +1,628 @@
# T-Pot: MOBILE
# Note: This docker compose file has been adjusted to limit the number of tools, services and honeypots to run
# T-Pot on a Raspberry Pi 4 (8GB of RAM).
# The standard docker compose file should work mostly fine (depending on traffic) if you do not enable a
# desktop environment such as LXDE and meet the minimum requirements of 8GB RAM.
version: '3.9'
networks:
ciscoasa_local:
citrixhoneypot_local:
conpot_local_IEC104:
conpot_local_ipmi:
conpot_local_kamstrup_382:
cowrie_local:
dicompot_local:
dionaea_local:
elasticpot_local:
heralding_local:
ipphoney_local:
log4pot_local:
mailoney_local:
medpot_local:
redishoneypot_local:
sentrypeer_local:
tanner_local:
wordpot_local:
ewsposter_local:
services:
#########################################
#### DEV
#########################################
#### T-Pot Init - Never delete this!
#########################################
# T-Pot Init Service
tpotinit:
container_name: tpotinit
env_file:
- .env
restart: always
stop_grace_period: 60s
tmpfs:
- /tmp/etc:uid=2000,gid=2000
- /tmp/:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
- ${TPOT_DATA_PATH}:/data
- /var/run/docker.sock:/var/run/docker.sock:ro
##################
#### Honeypots
##################
# Ciscoasa service
ciscoasa:
container_name: ciscoasa
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp/ciscoasa:uid=2000,gid=2000
networks:
- ciscoasa_local
ports:
- "5000:5000/udp"
- "8443:8443"
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
# CitrixHoneypot service
citrixhoneypot:
container_name: citrixhoneypot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- citrixhoneypot_local
ports:
- "443:443"
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/citrixhoneypot/log:/opt/citrixhoneypot/logs
# Conpot IEC104 service
conpot_IEC104:
container_name: conpot_iec104
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
- CONPOT_TEMPLATE=IEC104
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_IEC104
ports:
- "161:161/udp"
- "2404:2404"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot ipmi
conpot_ipmi:
container_name: conpot_ipmi
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
- CONPOT_TEMPLATE=ipmi
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_ipmi
ports:
- "623:623/udp"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot kamstrup_382
conpot_kamstrup_382:
container_name: conpot_kamstrup_382
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
- CONPOT_TEMPLATE=kamstrup_382
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_kamstrup_382
ports:
- "1025:1025"
- "50100:50100"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Cowrie service
cowrie:
container_name: cowrie
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp/cowrie:uid=2000,gid=2000
- /tmp/cowrie/data:uid=2000,gid=2000
networks:
- cowrie_local
ports:
- "22:22"
- "23:23"
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
# Dicompot service
# Get the Horos Client for testing: https://horosproject.org/
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
dicompot:
container_name: dicompot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- dicompot_local
ports:
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
# Dionaea service
dionaea:
container_name: dionaea
stdin_open: true
tty: true
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- dionaea_local
ports:
- "20:20"
- "21:21"
- "42:42"
- "69:69/udp"
- "81:81"
- "135:135"
# - "443:443"
- "445:445"
- "1433:1433"
- "1723:1723"
- "1883:1883"
- "3306:3306"
# - "5060:5060"
# - "5060:5060/udp"
# - "5061:5061"
- "27017:27017"
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
# ElasticPot service
elasticpot:
container_name: elasticpot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- elasticpot_local
ports:
- "9200:9200"
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
# Heralding service
heralding:
container_name: heralding
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp/heralding:uid=2000,gid=2000
networks:
- heralding_local
ports:
# - "21:21"
# - "22:22"
# - "23:23"
# - "25:25"
# - "80:80"
- "110:110"
- "143:143"
# - "443:443"
- "465:465"
- "993:993"
- "995:995"
# - "3306:3306"
# - "3389:3389"
- "1080:1080"
- "5432:5432"
- "5900:5900"
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
# Honeytrap service
honeytrap:
container_name: honeytrap
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp/honeytrap:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
# Ipphoney service
ipphoney:
container_name: ipphoney
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- ipphoney_local
ports:
- "631:631"
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
# Mailoney service
mailoney:
container_name: mailoney
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- HPFEEDS_SERVER=
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=20000
- HPFEEDS_CHANNELPREFIX=prefix
networks:
- mailoney_local
ports:
- "25:25"
- "587:25"
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
# Log4pot service
log4pot:
container_name: log4pot
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp:uid=2000,gid=2000
networks:
- log4pot_local
ports:
# - "80:8080"
# - "443:8080"
- "8080:8080"
# - "9200:8080"
- "25565:8080"
image: ${TPOT_REPO}/log4pot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/log4pot/log:/var/log/log4pot/log
- ${TPOT_DATA_PATH}/log4pot/payloads:/var/log/log4pot/payloads
# Medpot service
medpot:
container_name: medpot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- medpot_local
ports:
- "2575:2575"
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
# Redishoneypot service
redishoneypot:
container_name: redishoneypot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- redishoneypot_local
ports:
- "6379:6379"
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
# SentryPeer service
sentrypeer:
container_name: sentrypeer
restart: always
depends_on:
logstash:
condition: service_healthy
# environment:
# - SENTRYPEER_PEER_TO_PEER=1
networks:
- sentrypeer_local
ports:
# - "4222:4222/udp"
- "5060:5060/tcp"
- "5060:5060/udp"
# - "127.0.0.1:8082:8082"
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
#### Snare / Tanner
## Tanner Redis Service
tanner_redis:
container_name: tanner_redis
restart: always
depends_on:
logstash:
condition: service_healthy
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## PHP Sandbox service
tanner_phpox:
container_name: tanner_phpox
restart: always
depends_on:
logstash:
condition: service_healthy
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## Tanner API Service
tanner_api:
container_name: tanner_api
restart: always
depends_on:
- tanner_redis
tmpfs:
- /tmp/tanner:uid=2000,gid=2000
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
command: tannerapi
## Tanner Service
tanner:
container_name: tanner
restart: always
depends_on:
- tanner_api
- tanner_phpox
tmpfs:
- /tmp/tanner:uid=2000,gid=2000
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
command: tanner
read_only: true
volumes:
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
## Snare Service
snare:
container_name: snare
restart: always
depends_on:
- tanner
tty: true
networks:
- tanner_local
ports:
- "80:80"
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
# Wordpot service
wordpot:
container_name: wordpot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- wordpot_local
ports:
- "82:80"
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
##################
#### Tools
##################
#### ELK
## Elasticsearch service
elasticsearch:
container_name: elasticsearch
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
- ES_TMPDIR=/tmp
cap_add:
- IPC_LOCK
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
mem_limit: 4g
ports:
- "127.0.0.1:64298:9200"
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
## Logstash service
logstash:
container_name: logstash
restart: always
depends_on:
elasticsearch:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
#### /ELK
# Ewsposter service
ewsposter:
container_name: ewsposter
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- ewsposter_local
environment:
- EWS_HPFEEDS_ENABLE=false
- EWS_HPFEEDS_HOST=host
- EWS_HPFEEDS_PORT=port
- EWS_HPFEEDS_CHANNELS=channels
- EWS_HPFEEDS_IDENT=user
- EWS_HPFEEDS_SECRET=secret
- EWS_HPFEEDS_TLSCERT=false
- EWS_HPFEEDS_FORMAT=json
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip

View file

@ -1,22 +1,23 @@
# T-Pot: SENSOR
version: '3.9'
networks:
adbhoney_local:
ciscoasa_local:
citrixhoneypot_local:
conpot_local_IEC104:
conpot_local_guardian_ast:
conpot_local_ipmi:
conpot_local_kamstrup_382:
cowrie_local:
ddospot_local:
dicompot_local:
dionaea_local:
elasticpot_local:
h0neytr4p_local:
heralding_local:
honeyaml_local:
ipphoney_local:
mailoney_local:
medpot_local:
miniprint_local:
redishoneypot_local:
sentrypeer_local:
tanner_local:
@ -94,6 +95,23 @@ services:
volumes:
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
# CitrixHoneypot service
citrixhoneypot:
container_name: citrixhoneypot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- citrixhoneypot_local
ports:
- "443:443"
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/citrixhoneypot/log:/opt/citrixhoneypot/logs
# Conpot IEC104 service
conpot_IEC104:
container_name: conpot_iec104
@ -220,6 +238,29 @@ services:
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
# Ddospot service
ddospot:
container_name: ddospot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ddospot_local
ports:
- "19:19/udp"
- "53:53/udp"
- "123:123/udp"
# - "161:161/udp"
- "1900:1900/udp"
image: ${TPOT_REPO}/ddospot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ddospot/log:/opt/ddospot/ddospot/logs
- ${TPOT_DATA_PATH}/ddospot/bl:/opt/ddospot/ddospot/bl
- ${TPOT_DATA_PATH}/ddospot/db:/opt/ddospot/ddospot/db
# Dicompot service
# Get the Horos Client for testing: https://horosproject.org/
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
@ -233,7 +274,6 @@ services:
networks:
- dicompot_local
ports:
- "104:11112"
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
@ -300,25 +340,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
# H0neytr4p service
h0neytr4p:
container_name: h0neytr4p
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- h0neytr4p_local
ports:
- "443:443"
# - "80:80"
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
# Heralding service
heralding:
container_name: heralding
@ -353,23 +374,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
# Honeyaml service
honeyaml:
container_name: honeyaml
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- honeyaml_local
ports:
- "3000:8080"
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
# Honeytrap service
honeytrap:
container_name: honeytrap
@ -410,12 +414,16 @@ services:
# Mailoney service
mailoney:
container_name: mailoney
stdin_open: true
tty: true
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- HPFEEDS_SERVER=
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=20000
- HPFEEDS_CHANNELPREFIX=prefix
networks:
- mailoney_local
ports:
@ -444,24 +452,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
# Miniprint service
miniprint:
container_name: miniprint
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- miniprint_local
ports:
- "9100:9100"
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
# Redishoneypot service
redishoneypot:
container_name: redishoneypot
@ -594,7 +584,6 @@ services:
ports:
- "8080:80"
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
@ -675,7 +664,6 @@ services:
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g

View file

@ -1,27 +1,28 @@
# T-Pot: STANDARD
version: '3.9'
networks:
adbhoney_local:
ciscoasa_local:
citrixhoneypot_local:
conpot_local_IEC104:
conpot_local_guardian_ast:
conpot_local_ipmi:
conpot_local_kamstrup_382:
cowrie_local:
ddospot_local:
dicompot_local:
dionaea_local:
elasticpot_local:
h0neytr4p_local:
heralding_local:
honeyaml_local:
ipphoney_local:
mailoney_local:
medpot_local:
miniprint_local:
redishoneypot_local:
sentrypeer_local:
tanner_local:
spiderfoot_local:
wordpot_local:
nginx_local:
ewsposter_local:
services:
@ -96,6 +97,23 @@ services:
volumes:
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
# CitrixHoneypot service
citrixhoneypot:
container_name: citrixhoneypot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- citrixhoneypot_local
ports:
- "443:443"
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/citrixhoneypot/log:/opt/citrixhoneypot/logs
# Conpot IEC104 service
conpot_IEC104:
container_name: conpot_iec104
@ -222,6 +240,29 @@ services:
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
# Ddospot service
ddospot:
container_name: ddospot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ddospot_local
ports:
- "19:19/udp"
- "53:53/udp"
- "123:123/udp"
# - "161:161/udp"
- "1900:1900/udp"
image: ${TPOT_REPO}/ddospot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ddospot/log:/opt/ddospot/ddospot/logs
- ${TPOT_DATA_PATH}/ddospot/bl:/opt/ddospot/ddospot/bl
- ${TPOT_DATA_PATH}/ddospot/db:/opt/ddospot/ddospot/db
# Dicompot service
# Get the Horos Client for testing: https://horosproject.org/
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
@ -235,7 +276,6 @@ services:
networks:
- dicompot_local
ports:
- "104:11112"
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
@ -302,25 +342,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
# H0neytr4p service
h0neytr4p:
container_name: h0neytr4p
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- h0neytr4p_local
ports:
- "443:443"
# - "80:80"
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
# Heralding service
heralding:
container_name: heralding
@ -355,23 +376,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
# Honeyaml service
honeyaml:
container_name: honeyaml
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- honeyaml_local
ports:
- "3000:8080"
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
# Honeytrap service
honeytrap:
container_name: honeytrap
@ -412,12 +416,16 @@ services:
# Mailoney service
mailoney:
container_name: mailoney
stdin_open: true
tty: true
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- HPFEEDS_SERVER=
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=20000
- HPFEEDS_CHANNELPREFIX=prefix
networks:
- mailoney_local
ports:
@ -446,24 +454,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
# Miniprint service
miniprint:
container_name: miniprint
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- miniprint_local
ports:
- "9100:9100"
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
# Redishoneypot service
redishoneypot:
container_name: redishoneypot
@ -596,7 +586,6 @@ services:
ports:
- "8080:80"
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
@ -671,8 +660,6 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
environment:
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
@ -701,8 +688,6 @@ services:
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
mem_limit: 1g
ports:
- "127.0.0.1:64296:5601"
@ -716,14 +701,11 @@ services:
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
@ -739,8 +721,6 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
stop_signal: SIGKILL
tty: true
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
@ -754,8 +734,6 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
environment:
- MAP_COMMAND=AttackMapServer.py
stop_signal: SIGKILL
@ -772,8 +750,6 @@ services:
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
environment:
- MAP_COMMAND=DataServer_v2.py
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
@ -825,11 +801,9 @@ services:
- /var/tmp/nginx/scgi
- /run
- /var/lib/nginx/tmp:uid=100,gid=82
networks:
- nginx_local
network_mode: "host"
ports:
- "64297:64297"
- "64294:64294"
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
@ -847,7 +821,7 @@ services:
tpotinit:
condition: service_healthy
networks:
- nginx_local
- spiderfoot_local
ports:
- "127.0.0.1:64303:8080"
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}

View file

@ -1,406 +0,0 @@
# T-Pot: TARPIT
networks:
ddospot_local:
endlessh_local:
go-pot_local:
hellpot_local:
heralding_local:
nginx_local:
ewsposter_local:
services:
#########################################
#### DEV
#########################################
#### T-Pot Init - Never delete this!
#########################################
# T-Pot Init Service
tpotinit:
container_name: tpotinit
env_file:
- .env
restart: always
stop_grace_period: 60s
tmpfs:
- /tmp/etc:uid=2000,gid=2000
- /tmp/:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
- ${TPOT_DATA_PATH}:/data
- /var/run/docker.sock:/var/run/docker.sock:ro
##################
#### Honeypots
##################
# Ddospot service
ddospot:
container_name: ddospot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ddospot_local
ports:
- "19:19/udp"
- "53:53/udp"
- "123:123/udp"
# - "161:161/udp"
- "1900:1900/udp"
image: ${TPOT_REPO}/ddospot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ddospot/log:/opt/ddospot/ddospot/logs
- ${TPOT_DATA_PATH}/ddospot/bl:/opt/ddospot/ddospot/bl
- ${TPOT_DATA_PATH}/ddospot/db:/opt/ddospot/ddospot/db
# Endlessh service
endlessh:
container_name: endlessh
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- endlessh_local
ports:
- "22:2222"
image: ${TPOT_REPO}/endlessh:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/endlessh/log:/var/log/endlessh
# Go-pot service
go-pot:
container_name: go-pot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- go-pot_local
ports:
- "8080:8080"
image: ${TPOT_REPO}/go-pot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/go-pot/log:/opt/go-pot/log/
# Hellpot service
hellpot:
container_name: hellpot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- hellpot_local
ports:
- "80:8080"
image: ${TPOT_REPO}/hellpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/hellpot/log:/var/log/hellpot
# Heralding service
heralding:
container_name: heralding
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp/heralding:uid=2000,gid=2000
networks:
- heralding_local
ports:
- "21:21"
# - "22:22"
- "23:23"
- "25:25"
# - "80:80"
- "110:110"
- "143:143"
- "443:443"
- "465:465"
- "993:993"
- "995:995"
- "3306:3306"
- "3389:3389"
- "1080:1080"
- "5432:5432"
- "5900:5900"
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
##################
#### NSM
##################
# Fatt service
fatt:
container_name: fatt
restart: always
depends_on:
tpotinit:
condition: service_healthy
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
# P0f service
p0f:
container_name: p0f
restart: always
depends_on:
tpotinit:
condition: service_healthy
network_mode: "host"
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
# Suricata service
suricata:
container_name: suricata
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
##################
#### Tools
##################
#### ELK
## Elasticsearch service
elasticsearch:
container_name: elasticsearch
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
environment:
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
- ES_TMPDIR=/tmp
cap_add:
- IPC_LOCK
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
mem_limit: 4g
ports:
- "127.0.0.1:64298:9200"
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
## Kibana service
kibana:
container_name: kibana
restart: always
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
mem_limit: 1g
ports:
- "127.0.0.1:64296:5601"
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
## Logstash service
logstash:
container_name: logstash
restart: always
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
## Map Redis Service
map_redis:
container_name: map_redis
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
stop_signal: SIGKILL
tty: true
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## Map Web Service
map_web:
container_name: map_web
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
environment:
- MAP_COMMAND=AttackMapServer.py
stop_signal: SIGKILL
tty: true
ports:
- "127.0.0.1:64299:64299"
image: ${TPOT_REPO}/map:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
## Map Data Service
map_data:
container_name: map_data
restart: always
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
environment:
- MAP_COMMAND=DataServer_v2.py
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
stop_signal: SIGKILL
tty: true
image: ${TPOT_REPO}/map:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
#### /ELK
# Ewsposter service
ewsposter:
container_name: ewsposter
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ewsposter_local
environment:
- EWS_HPFEEDS_ENABLE=false
- EWS_HPFEEDS_HOST=host
- EWS_HPFEEDS_PORT=port
- EWS_HPFEEDS_CHANNELS=channels
- EWS_HPFEEDS_IDENT=user
- EWS_HPFEEDS_SECRET=secret
- EWS_HPFEEDS_TLSCERT=false
- EWS_HPFEEDS_FORMAT=json
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
# Nginx service
nginx:
container_name: nginx
restart: always
environment:
- TPOT_OSTYPE=${TPOT_OSTYPE}
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /var/tmp/nginx/client_body
- /var/tmp/nginx/proxy
- /var/tmp/nginx/fastcgi
- /var/tmp/nginx/uwsgi
- /var/tmp/nginx/scgi
- /run
- /var/lib/nginx/tmp:uid=100,gid=82
networks:
- nginx_local
ports:
- "64297:64297"
- "64294:64294"
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
# Spiderfoot service
spiderfoot:
container_name: spiderfoot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
ports:
- "127.0.0.1:64303:8080"
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot

View file

@ -4,7 +4,6 @@
networks:
adbhoney_local:
beelzebub_local:
ciscoasa_local:
citrixhoneypot_local:
conpot_local_IEC104:
@ -17,23 +16,18 @@ networks:
dionaea_local:
elasticpot_local:
endlessh_local:
galah_local:
go-pot_local:
h0neytr4p_local:
hellpot_local:
heralding_local:
honeyaml_local:
honeypots_local:
ipphoney_local:
log4pot_local:
mailoney_local:
medpot_local:
miniprint_local:
redishoneypot_local:
sentrypeer_local:
tanner_local:
wordpot_local:
nginx_local:
spiderfoot_local:
ewsposter_local:
services:
@ -87,34 +81,6 @@ services:
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
# Beelzebub service
beelzebub:
container_name: beelzebub
restart: always
depends_on:
tpotinit:
condition: service_healthy
# cpu_count: 1
# cpus: 0.25
networks:
- beelzebub_local
ports:
- "22:22"
# - "80:80"
# - "2222:2222"
# - "3306:3306"
# - "8080:8080"
image: ${TPOT_REPO}/beelzebub:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
environment:
LLM_MODEL: ${BEELZEBUB_LLM_MODEL}
LLM_HOST: ${BEELZEBUB_LLM_HOST}
OLLAMA_MODEL: ${BEELZEBUB_OLLAMA_MODEL}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/beelzebub/key:/opt/beelzebub/configurations/key
- ${TPOT_DATA_PATH}/beelzebub/log:/opt/beelzebub/configurations/log
# Ciscoasa service
ciscoasa:
container_name: ciscoasa
@ -314,7 +280,6 @@ services:
networks:
- dicompot_local
ports:
- "104:11112"
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
@ -398,93 +363,25 @@ services:
volumes:
- ${TPOT_DATA_PATH}/endlessh/log:/var/log/endlessh
# Galah service
galah:
container_name: galah
restart: always
depends_on:
tpotinit:
condition: service_healthy
# cpu_count: 1
# cpus: 0.25
networks:
- galah_local
ports:
- "80:80"
- "443:443"
- "8443:8443"
- "8080:8080"
image: ${TPOT_REPO}/galah:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
environment:
LLM_PROVIDER: ${GALAH_LLM_PROVIDER}
LLM_SERVER_URL: ${GALAH_LLM_SERVER_URL}
LLM_MODEL: ${GALAH_LLM_MODEL}
# LLM_TEMPERATURE: ${GALAH_LLM_TEMPERATURE}
# LLM_API_KEY: ${GALAH_LLM_API_KEY}
# LLM_CLOUD_LOCATION: ${GALAH_LLM_CLOUD_LOCATION}
# LLM_CLOUD_PROJECT: ${GALAH_LLM_CLOUD_PROJECT}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/galah/cache:/opt/galah/config/cache
- ${TPOT_DATA_PATH}/galah/cert:/opt/galah/config/cert
- ${TPOT_DATA_PATH}/galah/log:/opt/galah/log
# Glutton service
glutton:
container_name: glutton
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /var/lib/glutton:uid=2000,gid=2000
- /run:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/glutton:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/glutton/log:/var/log/glutton
- ${TPOT_DATA_PATH}/glutton/payloads:/opt/glutton/payloads
# Go-pot service
go-pot:
container_name: go-pot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- go-pot_local
ports:
- "8080:8080"
image: ${TPOT_REPO}/go-pot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/go-pot/log:/opt/go-pot/log/
# H0neytr4p service
h0neytr4p:
container_name: h0neytr4p
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- h0neytr4p_local
ports:
- "443:443"
# - "80:80"
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
# # Glutton service
# glutton:
# container_name: glutton
# restart: always
# depends_on:
# tpotinit:
# condition: service_healthy
# tmpfs:
# - /var/lib/glutton:uid=2000,gid=2000
# - /run:uid=2000,gid=2000
# network_mode: "host"
# cap_add:
# - NET_ADMIN
# image: ${TPOT_REPO}/glutton:${TPOT_VERSION}
# pull_policy: ${TPOT_PULL_POLICY}
# read_only: true
# volumes:
# - ${TPOT_DATA_PATH}/glutton/log:/var/log/glutton
# - ${TPOT_DATA_PATH}/glutton/payloads:/opt/glutton/payloads
# Hellpot service
hellpot:
@ -515,19 +412,19 @@ services:
networks:
- heralding_local
ports:
- "21:21"
- "22:22"
- "23:23"
- "25:25"
- "80:80"
# - "21:21"
# - "22:22"
# - "23:23"
# - "25:25"
# - "80:80"
- "110:110"
- "143:143"
- "443:443"
# - "443:443"
- "465:465"
- "993:993"
- "995:995"
- "3306:3306"
- "3389:3389"
# - "3306:3306"
# - "3389:3389"
- "1080:1080"
- "5432:5432"
- "5900:5900"
@ -537,23 +434,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
# Honeyaml service
honeyaml:
container_name: honeyaml
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- honeyaml_local
ports:
- "8080:8080"
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
# Honeypots service
honeypots:
container_name: honeypots
@ -572,8 +452,7 @@ services:
- "22:22"
- "23:23"
- "25:25"
- "53:53"
- "67:67/udp"
- "53:53/udp"
- "80:80"
- "110:110"
- "123:123"
@ -667,12 +546,16 @@ services:
# Mailoney service
mailoney:
container_name: mailoney
stdin_open: true
tty: true
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- HPFEEDS_SERVER=
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=20000
- HPFEEDS_CHANNELPREFIX=prefix
networks:
- mailoney_local
ports:
@ -701,24 +584,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
# Miniprint service
miniprint:
container_name: miniprint
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- miniprint_local
ports:
- "9100:9100"
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
# Redishoneypot service
redishoneypot:
container_name: redishoneypot
@ -926,8 +791,6 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
environment:
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
@ -956,8 +819,6 @@ services:
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
mem_limit: 1g
ports:
- "127.0.0.1:64296:5601"
@ -971,14 +832,11 @@ services:
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
@ -994,8 +852,6 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
stop_signal: SIGKILL
tty: true
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
@ -1009,8 +865,6 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
environment:
- MAP_COMMAND=AttackMapServer.py
stop_signal: SIGKILL
@ -1027,8 +881,6 @@ services:
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
environment:
- MAP_COMMAND=DataServer_v2.py
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
@ -1080,11 +932,9 @@ services:
- /var/tmp/nginx/scgi
- /run
- /var/lib/nginx/tmp:uid=100,gid=82
networks:
- nginx_local
network_mode: "host"
ports:
- "64297:64297"
- "64294:64294"
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
@ -1102,7 +952,7 @@ services:
tpotinit:
condition: service_healthy
networks:
- nginx_local
- spiderfoot_local
ports:
- "127.0.0.1:64303:8080"
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 443 KiB

After

Width:  |  Height:  |  Size: 382 KiB

View file

@ -1,27 +1,28 @@
# T-Pot: STANDARD
version: '3.9'
networks:
adbhoney_local:
ciscoasa_local:
citrixhoneypot_local:
conpot_local_IEC104:
conpot_local_guardian_ast:
conpot_local_ipmi:
conpot_local_kamstrup_382:
cowrie_local:
ddospot_local:
dicompot_local:
dionaea_local:
elasticpot_local:
h0neytr4p_local:
heralding_local:
honeyaml_local:
ipphoney_local:
mailoney_local:
medpot_local:
miniprint_local:
redishoneypot_local:
sentrypeer_local:
tanner_local:
spiderfoot_local:
wordpot_local:
nginx_local:
ewsposter_local:
services:
@ -96,6 +97,23 @@ services:
volumes:
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
# CitrixHoneypot service
citrixhoneypot:
container_name: citrixhoneypot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- citrixhoneypot_local
ports:
- "443:443"
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/citrixhoneypot/log:/opt/citrixhoneypot/logs
# Conpot IEC104 service
conpot_IEC104:
container_name: conpot_iec104
@ -222,6 +240,29 @@ services:
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
# Ddospot service
ddospot:
container_name: ddospot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ddospot_local
ports:
- "19:19/udp"
- "53:53/udp"
- "123:123/udp"
# - "161:161/udp"
- "1900:1900/udp"
image: ${TPOT_REPO}/ddospot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ddospot/log:/opt/ddospot/ddospot/logs
- ${TPOT_DATA_PATH}/ddospot/bl:/opt/ddospot/ddospot/bl
- ${TPOT_DATA_PATH}/ddospot/db:/opt/ddospot/ddospot/db
# Dicompot service
# Get the Horos Client for testing: https://horosproject.org/
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
@ -235,7 +276,6 @@ services:
networks:
- dicompot_local
ports:
- "104:11112"
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
@ -302,25 +342,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
# H0neytr4p service
h0neytr4p:
container_name: h0neytr4p
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- h0neytr4p_local
ports:
- "443:443"
# - "80:80"
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
# Heralding service
heralding:
container_name: heralding
@ -355,23 +376,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
# Honeyaml service
honeyaml:
container_name: honeyaml
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- honeyaml_local
ports:
- "3000:8080"
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
# Honeytrap service
honeytrap:
container_name: honeytrap
@ -412,12 +416,16 @@ services:
# Mailoney service
mailoney:
container_name: mailoney
stdin_open: true
tty: true
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- HPFEEDS_SERVER=
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=20000
- HPFEEDS_CHANNELPREFIX=prefix
networks:
- mailoney_local
ports:
@ -446,24 +454,6 @@ services:
volumes:
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
# Miniprint service
miniprint:
container_name: miniprint
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- miniprint_local
ports:
- "9100:9100"
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
# Redishoneypot service
redishoneypot:
container_name: redishoneypot
@ -596,7 +586,6 @@ services:
ports:
- "8080:80"
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
@ -671,8 +660,6 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
environment:
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
@ -701,8 +688,6 @@ services:
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
mem_limit: 1g
ports:
- "127.0.0.1:64296:5601"
@ -716,14 +701,11 @@ services:
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
@ -739,8 +721,6 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
stop_signal: SIGKILL
tty: true
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
@ -754,8 +734,6 @@ services:
depends_on:
tpotinit:
condition: service_healthy
networks:
- nginx_local
environment:
- MAP_COMMAND=AttackMapServer.py
stop_signal: SIGKILL
@ -772,8 +750,6 @@ services:
depends_on:
elasticsearch:
condition: service_healthy
networks:
- nginx_local
environment:
- MAP_COMMAND=DataServer_v2.py
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
@ -825,11 +801,9 @@ services:
- /var/tmp/nginx/scgi
- /run
- /var/lib/nginx/tmp:uid=100,gid=82
networks:
- nginx_local
network_mode: "host"
ports:
- "64297:64297"
- "64294:64294"
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
@ -847,7 +821,7 @@ services:
tpotinit:
condition: service_healthy
networks:
- nginx_local
- spiderfoot_local
ports:
- "127.0.0.1:64303:8080"
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}

View file

@ -1,23 +0,0 @@
# T-Pot builder config file. Do not remove.
##########################
# T-Pot Builder Settings #
##########################
# docker compose .env
TPOT_DOCKER_ENV=./.env
# Docker-Compose file
TPOT_DOCKER_COMPOSE=./docker-compose.yml
# T-Pot Repos
TPOT_DOCKER_REPO=dtagdevsec
TPOT_GHCR_REPO=ghcr.io/telekom-security
# T-Pot Version Tag
TPOT_VERSION=24.04.1
# T-Pot platforms (architectures)
# Most docker features are available on linux
TPOT_AMD64=linux/amd64
TPOT_ARM64=linux/arm64

View file

@ -1,202 +0,0 @@
#!/usr/bin/env bash
# Got root?
myWHOAMI=$(whoami)
if [ "$myWHOAMI" != "root" ]
then
echo "Need to run as root ..."
exit
fi
# ANSI color codes for green (OK) and red (FAIL)
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
# Default settings
PUSH_IMAGES=false
NO_CACHE=false
PARALLELBUILDS=2
UPLOAD_BANDWIDTH=40mbit # Set this to max 90% of available upload bandwidth
INTERFACE=$(ip route | grep "^default" | awk '{ print $5 }')
# Help message
usage() {
echo "Usage: $0 [-p] [-n] [-h]"
echo " -p Push images after building"
echo " -n Build images with --no-cache"
echo " -h Show help message"
exit 1
}
# Parse command-line options
while getopts ":pnh" opt; do
case ${opt} in
p )
PUSH_IMAGES=true
docker login
docker login ghcr.io
;;
n )
NO_CACHE=true
;;
h )
usage
;;
\? )
echo "Invalid option: $OPTARG" 1>&2
usage
;;
esac
done
# Function to apply upload bandwidth limit using tc
apply_bandwidth_limit() {
echo -n "Applying upload bandwidth limit of $UPLOAD_BANDWIDTH on interface $INTERFACE..."
if tc qdisc add dev $INTERFACE root tbf rate $UPLOAD_BANDWIDTH burst 32kbit latency 400ms >/dev/null 2>&1; then
echo -e " [${GREEN}OK${NC}]"
else
echo -e " [${RED}FAIL${NC}]"
remove_bandwidth_limit
# Try to reapply the limit
echo -n "Reapplying upload bandwidth limit of $UPLOAD_BANDWIDTH on interface $INTERFACE..."
if tc qdisc add dev $INTERFACE root tbf rate $UPLOAD_BANDWIDTH burst 32kbit latency 400ms >/dev/null 2>&1; then
echo -e " [${GREEN}OK${NC}]"
else
echo -e " [${RED}FAIL${NC}]"
echo "Failed to apply bandwidth limit on $INTERFACE. Exiting."
echo
exit 1
fi
fi
}
# Function to check if the bandwidth limit is set
is_bandwidth_limit_set() {
tc qdisc show dev $INTERFACE | grep -q 'tbf'
}
# Function to remove the bandwidth limit using tc if it is set
remove_bandwidth_limit() {
if is_bandwidth_limit_set; then
echo -n "Removing upload bandwidth limit on interface $INTERFACE..."
if tc qdisc del dev $INTERFACE root; then
echo -e " [${GREEN}OK${NC}]"
else
echo -e " [${RED}FAIL${NC}]"
fi
fi
}
echo "###########################"
echo "# T-Pot Image Builder"
echo "###########################"
echo
# Check if 'mybuilder' exists, and ensure it's running with bootstrap
echo -n "Checking if buildx builder 'mybuilder' exists and is running..."
if ! docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
echo
echo -n " Creating and starting buildx builder 'mybuilder'..."
if docker buildx create --name mybuilder --driver docker-container --use >/dev/null 2>&1 && \
docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
echo -e " [${GREEN}OK${NC}]"
else
echo -e " [${RED}FAIL${NC}]"
exit 1
fi
else
echo -e " [${GREEN}OK${NC}]"
fi
# Ensure arm64 and amd64 platforms are active
echo -n "Ensuring 'mybuilder' supports linux/arm64 and linux/amd64..."
# Get active platforms from buildx
active_platforms=$(docker buildx inspect mybuilder --bootstrap | grep -oP '(?<=Platforms: ).*')
if [[ "$active_platforms" == *"linux/arm64"* && "$active_platforms" == *"linux/amd64"* ]]; then
echo -e " [${GREEN}OK${NC}]"
else
echo
echo -n " Enabling platforms linux/arm64 and linux/amd64..."
if docker buildx create --name mybuilder --driver docker-container --use --platform linux/amd64,linux/arm64 >/dev/null 2>&1 && \
docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
echo -e " [${GREEN}OK${NC}]"
else
echo -e " [${RED}FAIL${NC}]"
exit 1
fi
fi
# Ensure QEMU is set up for cross-platform builds
echo -n "Ensuring QEMU is configured for cross-platform builds..."
if docker run --rm --privileged multiarch/qemu-user-static --reset -p yes > /dev/null 2>&1; then
echo -e " [${GREEN}OK${NC}]"
else
echo -e " [${RED}FAIL${NC}]"
fi
# Apply bandwidth limit only if pushing images
if $PUSH_IMAGES; then
echo
echo "########################################"
echo "# Setting Upload Bandwidth limit ..."
echo "########################################"
echo
apply_bandwidth_limit
fi
# Trap to ensure bandwidth limit is removed on script error, exit
trap_cleanup() {
if is_bandwidth_limit_set; then
remove_bandwidth_limit
fi
}
trap trap_cleanup INT ERR EXIT
echo
echo "################################"
echo "# Now building images ..."
echo "################################"
echo
mkdir -p log
# List of services to build
services=$(docker compose config --services | sort)
# Loop through each service to build
echo $services | tr ' ' '\n' | xargs -I {} -P $PARALLELBUILDS bash -c '
echo "Building image: {}" && \
build_cmd="docker compose build {}" && \
if '$PUSH_IMAGES'; then \
build_cmd="$build_cmd --push"; \
fi && \
if '$NO_CACHE'; then \
build_cmd="$build_cmd --no-cache"; \
fi && \
eval "$build_cmd 2>&1 > log/{}.log" && \
echo -e "Image {}: ['$GREEN'OK'$NC']" || \
echo -e "Image {}: ['$RED'FAIL'$NC']"
'
# Remove bandwidth limit if it was applied
if is_bandwidth_limit_set; then
echo
echo "########################################"
echo "# Removiong Upload Bandwidth limit ..."
echo "########################################"
echo
remove_bandwidth_limit
fi
echo
echo "#######################################################"
echo "# Done."
if ! "$PUSH_IMAGES"; then
echo "# Remeber to push the images using push option."
fi
echo "#######################################################"
echo

View file

@ -1,421 +0,0 @@
# T-Pot Docker Compose Image Builder (use only for building docker images)
# Settings in .env
##################
#### Anchors
##################
# Common build config
x-common-build: &common-build
dockerfile: ./Dockerfile
platforms:
- ${TPOT_AMD64}
- ${TPOT_ARM64}
services:
##################
#### Honeypots
##################
# Adbhoney
adbhoney:
image: ${TPOT_DOCKER_REPO}/adbhoney:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/adbhoney:${TPOT_VERSION}
context: ../adbhoney/
<<: *common-build
# Beelzebub
beelzebub:
image: ${TPOT_DOCKER_REPO}/beelzebub:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/beelzebub:${TPOT_VERSION}
context: ../beelzebub/
<<: *common-build
# Ciscoasa
ciscoasa:
image: ${TPOT_DOCKER_REPO}/ciscoasa:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/ciscoasa:${TPOT_VERSION}
context: ../ciscoasa/
<<: *common-build
# Citrixhoneypot
citrixhoneypot:
image: ${TPOT_DOCKER_REPO}/citrixhoneypot:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/citrixhoneypot:${TPOT_VERSION}
context: ../citrixhoneypot/
<<: *common-build
# Conpot
conpot:
image: ${TPOT_DOCKER_REPO}/conpot:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/conpot:${TPOT_VERSION}
context: ../conpot/
<<: *common-build
# Cowrie
cowrie:
image: ${TPOT_DOCKER_REPO}/cowrie:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/cowrie:${TPOT_VERSION}
context: ../cowrie/
<<: *common-build
# Ddospot
ddospot:
image: ${TPOT_DOCKER_REPO}/ddospot:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/ddospot:${TPOT_VERSION}
context: ../ddospot/
<<: *common-build
# Dicompot
dicompot:
image: ${TPOT_DOCKER_REPO}/dicompot:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/dicompot:${TPOT_VERSION}
context: ../dicompot/
<<: *common-build
# Dionaea
dionaea:
image: ${TPOT_DOCKER_REPO}/dionaea:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/dionaea:${TPOT_VERSION}
context: ../dionaea/
<<: *common-build
# Elasticpot
elasticpot:
image: ${TPOT_DOCKER_REPO}/elasticpot:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/elasticpot:${TPOT_VERSION}
context: ../elasticpot/
<<: *common-build
# Endlessh
endlessh:
image: ${TPOT_DOCKER_REPO}/endlessh:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/endlessh:${TPOT_VERSION}
context: ../endlessh/
<<: *common-build
# Galah
galah:
image: ${TPOT_DOCKER_REPO}/galah:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/galah:${TPOT_VERSION}
context: ../galah/
<<: *common-build
# Glutton
glutton:
image: ${TPOT_DOCKER_REPO}/glutton:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/glutton:${TPOT_VERSION}
context: ../glutton/
<<: *common-build
# Go-pot
go-pot:
image: ${TPOT_DOCKER_REPO}/go-pot:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/go-pot:${TPOT_VERSION}
context: ../go-pot/
<<: *common-build
# H0neytr4p
h0neytr4p:
image: ${TPOT_DOCKER_REPO}/h0neytr4p:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/h0neytr4p:${TPOT_VERSION}
context: ../h0neytr4p/
<<: *common-build
# Hellpot
hellpot:
image: ${TPOT_DOCKER_REPO}/hellpot:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/hellpot:${TPOT_VERSION}
context: ../hellpot/
<<: *common-build
# Herlading
heralding:
image: ${TPOT_DOCKER_REPO}/heralding:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/heralding:${TPOT_VERSION}
context: ../heralding/
<<: *common-build
# Honeyaml
honeyaml:
image: ${TPOT_DOCKER_REPO}/honeyaml:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/honeyaml:${TPOT_VERSION}
context: ../honeyaml/
<<: *common-build
# Honeypots
honeypots:
image: ${TPOT_DOCKER_REPO}/honeypots:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/honeypots:${TPOT_VERSION}
context: ../honeypots/
<<: *common-build
# Honeytrap
honeytrap:
image: ${TPOT_DOCKER_REPO}/honeytrap:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/honeytrap:${TPOT_VERSION}
context: ../honeytrap/
<<: *common-build
# Ipphoney
ipphoney:
image: ${TPOT_DOCKER_REPO}/ipphoney:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/ipphoney:${TPOT_VERSION}
context: ../ipphoney/
<<: *common-build
# Log4pot
log4pot:
image: ${TPOT_DOCKER_REPO}/log4pot:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/log4pot:${TPOT_VERSION}
context: ../log4pot/
<<: *common-build
# Mailoney
mailoney:
image: ${TPOT_DOCKER_REPO}/mailoney:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/mailoney:${TPOT_VERSION}
context: ../mailoney/
<<: *common-build
# Medpot
medpot:
image: ${TPOT_DOCKER_REPO}/medpot:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/medpot:${TPOT_VERSION}
context: ../medpot/
<<: *common-build
# Miniprint
miniprint:
image: ${TPOT_DOCKER_REPO}/miniprint:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/miniprint:${TPOT_VERSION}
context: ../miniprint/
<<: *common-build
# Redishoneypot
redishoneypot:
image: ${TPOT_DOCKER_REPO}/redishoneypot:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/redishoneypot:${TPOT_VERSION}
context: ../redishoneypot/
<<: *common-build
# Sentrypeer
sentrypeer:
image: ${TPOT_DOCKER_REPO}/sentrypeer:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/sentrypeer:${TPOT_VERSION}
context: ../sentrypeer/
<<: *common-build
#### Snare / Tanner
## Tanner Redis
redis:
image: ${TPOT_DOCKER_REPO}/redis:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/redis:${TPOT_VERSION}
context: ../tanner/redis/
<<: *common-build
## PHP Sandbox
phpox:
image: ${TPOT_DOCKER_REPO}/phpox:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/phpox:${TPOT_VERSION}
context: ../tanner/phpox/
<<: *common-build
## Tanner
tanner:
image: ${TPOT_DOCKER_REPO}/tanner:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/tanner:${TPOT_VERSION}
context: ../tanner/tanner/
<<: *common-build
## Snare
snare:
image: ${TPOT_DOCKER_REPO}/snare:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/snare:${TPOT_VERSION}
context: ../tanner/snare/
<<: *common-build
####
# Wordpot
wordpot:
image: ${TPOT_DOCKER_REPO}/wordpot:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/wordpot:${TPOT_VERSION}
context: ../wordpot/
<<: *common-build
##################
#### NSM
##################
# Fatt
fatt:
image: ${TPOT_DOCKER_REPO}/fatt:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/fatt:${TPOT_VERSION}
context: ../fatt/
<<: *common-build
# P0f
p0f:
image: ${TPOT_DOCKER_REPO}/p0f:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/p0f:${TPOT_VERSION}
context: ../p0f/
<<: *common-build
# Suricata
suricata:
image: ${TPOT_DOCKER_REPO}/suricata:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/suricata:${TPOT_VERSION}
context: ../suricata/
<<: *common-build
##################
#### Tools
##################
# T-Pot Init
tpotinit:
image: ${TPOT_DOCKER_REPO}/tpotinit:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/tpotinit:${TPOT_VERSION}
context: ../tpotinit/
<<: *common-build
#### ELK
## Elasticsearch
elasticsearch:
image: ${TPOT_DOCKER_REPO}/elasticsearch:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/elasticsearch:${TPOT_VERSION}
context: ../elk/elasticsearch/
<<: *common-build
## Kibana
kibana:
image: ${TPOT_DOCKER_REPO}/kibana:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/kibana:${TPOT_VERSION}
context: ../elk/kibana/
<<: *common-build
## Logstash
logstash:
image: ${TPOT_DOCKER_REPO}/logstash:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/logstash:${TPOT_VERSION}
context: ../elk/logstash/
<<: *common-build
## Map Web
map:
image: ${TPOT_DOCKER_REPO}/map:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/map:${TPOT_VERSION}
context: ../elk/map/
<<: *common-build
####
# Ewsposter
ewsposter:
image: ${TPOT_DOCKER_REPO}/ewsposter:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/ewsposter:${TPOT_VERSION}
context: ../ewsposter/
<<: *common-build
# Nginx
nginx:
image: ${TPOT_DOCKER_REPO}/nginx:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/nginx:${TPOT_VERSION}
context: ../nginx/
<<: *common-build
# Spiderfoot
spiderfoot:
image: ${TPOT_DOCKER_REPO}/spiderfoot:${TPOT_VERSION}
build:
tags:
- ${TPOT_GHCR_REPO}/spiderfoot:${TPOT_VERSION}
context: ../spiderfoot/
<<: *common-build

View file

@ -1,99 +0,0 @@
#!/usr/bin/env bash
# ANSI color codes for green (OK) and red (FAIL)
BLUE='\033[0;34m'
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
# Check if the user is in the docker group
if ! groups $(whoami) | grep &>/dev/null '\bdocker\b'; then
echo -e "${RED}You need to be in the docker group to run this script without root privileges.${NC}"
echo "Please run the following command to add yourself to the docker group:"
echo " sudo usermod -aG docker $(whoami)"
echo "Then log out and log back in or run the script with sudo."
exit 1
fi
# Command-line switch check
if [ "$1" != "-y" ]; then
echo "### Setting up Docker for Multi-Arch Builds."
echo "### Requires Docker packages from https://get.docker.com/"
echo "### Use on x64 only!"
echo "### Run with -y if you fit the requirements!"
exit 0
fi
# Check if the mybuilder exists and is running
echo -n "Checking if buildx builder 'mybuilder' exists and is running..."
if ! docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
echo
echo -n " Creating and starting buildx builder 'mybuilder'..."
if docker buildx create --name mybuilder --driver docker-container --use >/dev/null 2>&1 && \
docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
echo -e " [${GREEN}OK${NC}]"
else
echo -e " [${RED}FAIL${NC}]"
exit 1
fi
else
echo -e " [${GREEN}OK${NC}]"
fi
# Ensure QEMU is set up for cross-platform builds
echo -n "Ensuring QEMU is configured for cross-platform builds..."
if docker run --rm --privileged multiarch/qemu-user-static --reset -p yes >/dev/null 2>&1; then
echo -e " [${GREEN}OK${NC}]"
else
echo -e " [${RED}FAIL${NC}]"
exit 1
fi
# Ensure arm64 and amd64 platforms are active
echo -n "Ensuring 'mybuilder' supports linux/arm64 and linux/amd64..."
active_platforms=$(docker buildx inspect mybuilder --bootstrap | grep -oP '(?<=Platforms: ).*')
if [[ "$active_platforms" == *"linux/arm64"* && "$active_platforms" == *"linux/amd64"* ]]; then
echo -e " [${GREEN}OK${NC}]"
else
echo
echo -n " Enabling platforms linux/arm64 and linux/amd64..."
if docker buildx create --name mybuilder --driver docker-container --use --platform linux/amd64,linux/arm64 >/dev/null 2>&1 && \
docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
echo -e " [${GREEN}OK${NC}]"
else
echo -e " [${RED}FAIL${NC}]"
exit 1
fi
fi
echo
echo -e "${BLUE}### Done.${NC}"
echo
echo -e "${BLUE}Examples:${NC}"
echo -e " ${BLUE}Manual multi-arch build:${NC}"
echo " docker buildx build --platform linux/amd64,linux/arm64 -t username/demo:latest --push ."
echo
echo -e " ${BLUE}Documentation:${NC} https://docs.docker.com/desktop/multi-arch/"
echo
echo -e " ${BLUE}Build release with Docker Compose:${NC}"
echo " docker compose build"
echo
echo -e " ${BLUE}Build and push release with Docker Compose:${NC}"
echo " docker compose build --push"
echo
echo -e " ${BLUE}Build a single image with Docker Compose:${NC}"
echo " docker compose build tpotinit"
echo
echo -e " ${BLUE}Build and push a single image with Docker Compose:${NC}"
echo " docker compose build tpotinit --push"
echo
echo -e "${BLUE}Resolve buildx issues:${NC}"
echo " docker buildx create --use --name mybuilder"
echo " docker buildx inspect mybuilder --bootstrap"
echo " docker login -u <username>"
echo " docker login ghcr.io -u <username>"
echo
echo -e "${BLUE}Fix segmentation faults when building arm64 images:${NC}"
echo " docker run --rm --privileged multiarch/qemu-user-static --reset -p yes"
echo

View file

@ -1,35 +1,39 @@
FROM alpine:3.20 AS builder
FROM alpine:3.19
#
# Include dist
COPY dist/ /root/dist/
#
# Install packages
RUN apk --no-cache -U upgrade && \
apk --no-cache -U add \
build-base \
RUN apk --no-cache -U add \
git \
procps \
py3-psutil \
py3-requests \
py3-pip \
python3 && \
pip3 install --break-system-packages pyinstaller && \
#
# Install adbhoney from git
git clone https://github.com/t3chn0m4g3/ADBHoney /opt/adbhoney && \
git clone https://github.com/huuck/ADBHoney /opt/adbhoney && \
cd /opt/adbhoney && \
git checkout 42a73cd8a82ddd4d137de70ac37b1a8b2e3e0119 && \
# git checkout 2417a7a982f4fd527b3a048048df9a23178767ad && \
git checkout 42afd98611724ca3d694a48b694c957e8d953db4 && \
cp /root/dist/adbhoney.cfg /opt/adbhoney && \
cp /root/dist/cpu_check.py / && \
sed -i 's/dst_ip/dest_ip/' /opt/adbhoney/adbhoney/core.py && \
sed -i 's/dst_port/dest_port/' /opt/adbhoney/adbhoney/core.py && \
pyinstaller adbhoney.spec
#
FROM alpine:3.20
RUN apk --no-cache -U upgrade
COPY --from=builder /opt/adbhoney/dist/adbhoney/ /opt/adbhoney/
# Setup user, groups and configs
addgroup -g 2000 adbhoney && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 adbhoney && \
chown -R adbhoney:adbhoney /opt/adbhoney && \
#
# Clean up
apk del --purge git && \
rm -rf /root/* /opt/adbhoney/.git /var/cache/apk/*
#
# Set workdir and start adbhoney
STOPSIGNAL SIGINT
USER 2000:2000
# Adbhoney sometimes hangs at 100% CPU usage, if detected container will become unhealthy and restarted by tpotinit
HEALTHCHECK --interval=5m --timeout=30s --retries=3 CMD python3 /cpu_check.py $(pgrep -of run.py) 99
USER adbhoney:adbhoney
WORKDIR /opt/adbhoney/
CMD ["./adbhoney"]
CMD /usr/bin/python3 run.py

View file

@ -1,3 +1,5 @@
version: '2.3'
networks:
adbhoney_local:

View file

@ -1,31 +0,0 @@
FROM golang:1.23-alpine AS builder
#
ENV GO111MODULE=on \
CGO_ENABLED=0 \
GOOS=linux
#
# Install packages
RUN apk -U add git
#
WORKDIR /root
#
# Build beelzebub
RUN git clone https://github.com/t3chn0m4g3/beelzebub && \
cd beelzebub && \
git checkout 0b9aba53ec1671f669d22782758142a1d411b858
WORKDIR /root/beelzebub
RUN go mod download
RUN go build -o main .
RUN sed -i "s#logsPath: ./log#logsPath: ./configurations/log/beelzebub.json#g" /root/beelzebub/configurations/beelzebub.yaml
RUN sed -i 's/passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$"/passwordRegex: ".*"/g' /root/beelzebub/configurations/services/ssh-22.yaml
#
FROM scratch
#
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=builder /root/beelzebub/main /opt/beelzebub/
COPY --from=builder /root/beelzebub/configurations /opt/beelzebub/configurations
#
# Start beelzebub
WORKDIR /opt/beelzebub
USER 2000:2000
ENTRYPOINT ["./main"]

View file

@ -1,29 +0,0 @@
networks:
beelzebub_local:
services:
# Beelzebub service
beelzebub:
build: .
container_name: beelzebub
restart: always
# cpu_count: 1
# cpus: 0.25
networks:
- beelzebub_local
ports:
- "22:22"
- "80:80"
- "2222:2222"
- "3306:3306"
- "8080:8080"
environment:
LLM_MODEL: "ollama"
LLM_HOST: "http://ollama.local:11434/api/chat"
OLLAMA_MODEL: "openchat"
image: "ghcr.io/telekom-security/beelzebub:24.04.1"
read_only: true
volumes:
- $HOME/tpotce/data/beelzebub/key:/opt/beelzebub/configurations/key
- $HOME/tpotce/data/beelzebub/log:/opt/beelzebub/configurations/log

119
docker/builder.sh Executable file
View file

@ -0,0 +1,119 @@
#!/bin/bash
# Buildx Example: docker buildx build --platform linux/amd64,linux/arm64 -t username/demo:latest --push .
# Setup Vars
myPLATFORMS="linux/amd64,linux/arm64"
myHUBORG_DOCKER="dtagdevsec"
myHUBORG_GITHUB="ghcr.io/telekom-security"
myTAG="24.04"
myIMAGESBASE="tpotinit adbhoney ciscoasa citrixhoneypot conpot cowrie ddospot dicompot dionaea elasticpot endlessh ewsposter fatt glutton hellpot heralding honeypots honeytrap ipphoney log4pot mailoney medpot nginx p0f redishoneypot sentrypeer spiderfoot suricata wordpot"
myIMAGESELK="elasticsearch kibana logstash map"
myIMAGESTANNER="phpox redis snare tanner"
myBUILDERLOG="builder.log"
myBUILDERERR="builder.err"
myBUILDCACHE="/buildcache"
# Got root?
myWHOAMI=$(whoami)
if [ "$myWHOAMI" != "root" ]
then
echo "Need to run as root ..."
exit
fi
# Check for Buildx
docker buildx > /dev/null 2>&1
if [ "$?" == "1" ];
then
echo "### Build environment not setup. Install docker engine from docker:"
echo "### https://docs.docker.com/engine/install/debian/"
fi
# Let's ensure arm64 and amd64 are supported
echo "### Let's ensure ARM64 and AMD64 are supported ..."
myARCHITECTURES="amd64 arm64"
mySUPPORTED=$(docker buildx inspect --bootstrap)
for i in $myARCHITECTURES;
do
if ! echo $mySUPPORTED | grep -q linux/$i;
then
echo "## Installing $i support ..."
docker run --privileged --rm tonistiigi/binfmt --install $i
docker buildx inspect --bootstrap
else
echo "## $i support detected!"
fi
done
echo
# Let's ensure we have builder created with cache support
echo "### Checking for mybuilder ..."
if ! docker buildx ls | grep -q mybuilder;
then
echo "## Setting up mybuilder ..."
docker buildx create --name mybuilder
# Set as default, otherwise local cache is not supported
docker buildx use mybuilder
docker buildx inspect --bootstrap
else
echo "## Found mybuilder!"
fi
echo
# Only run with command switch
if [ "$1" == "" ]; then
echo "### T-Pot Multi Arch Image Builder."
echo "## Usage: builder.sh [build, push]"
echo "## build - Just build images, do not push."
echo "## push - Build and push images."
echo "## Pushing requires an active docker login."
exit
fi
fuBUILDIMAGES () {
local myPATH="$1"
local myIMAGELIST="$2"
local myPUSHOPTION="$3"
for myREPONAME in $myIMAGELIST;
do
echo -n "Now building: $myREPONAME in $myPATH$myREPONAME/."
docker buildx build --cache-from "type=local,src=$myBUILDCACHE" \
--cache-to "type=local,dest=$myBUILDCACHE" \
--platform $myPLATFORMS \
-t $myHUBORG_DOCKER/$myREPONAME:$myTAG \
-t $myHUBORG_GITHUB/$myREPONAME:$myTAG \
$myPUSHOPTION $myPATH$myREPONAME/. >> $myBUILDERLOG 2>&1
if [ "$?" != "0" ];
then
echo " [ ERROR ] - Check logs!"
echo "Error building $myREPONAME" >> "$myBUILDERERR"
else
echo " [ OK ]"
fi
done
}
# Just build images
if [ "$1" == "build" ];
then
mkdir -p $myBUILDCACHE
rm -f "$myBUILDERLOG" "$myBUILDERERR"
echo "### Building images ..."
fuBUILDIMAGES "" "$myIMAGESBASE" ""
fuBUILDIMAGES "elk/" "$myIMAGESELK" ""
fuBUILDIMAGES "tanner/" "$myIMAGESTANNER" ""
fi
# Build and push images
if [ "$1" == "push" ];
then
mkdir -p $myBUILDCACHE
rm -f "$myBUILDERLOG" "$myBUILDERERR"
echo "### Building and pushing images ..."
fuBUILDIMAGES "" "$myIMAGESBASE" "--push"
fuBUILDIMAGES "elk/" "$myIMAGESELK" "--push"
fuBUILDIMAGES "tanner/" "$myIMAGESTANNER" "--push"
fi

View file

@ -1,36 +1,48 @@
FROM alpine:3.20 AS builder
FROM alpine:3.19
#
# Install packages
# Include dist
COPY dist/ /root/dist/
#
# Setup env and apt
RUN apk --no-cache -U upgrade && \
apk --no-cache -U add \
build-base \
apk --no-cache add build-base \
git \
libffi \
libffi-dev \
openssl \
openssl-dev \
py3-cryptography \
py3-pip \
python3 \
python3-dev && \
#
# Get and install packages
mkdir -p /opt/ && \
cd /opt/ && \
git clone https://github.com/t3chn0m4g3/ciscoasa_honeypot && \
cd ciscoasa_honeypot && \
git checkout 4bd2795cfa14320a87c00b7159fa3b7d6a8ba254 && \
sed -i "s/git+git/git+https/g" requirements.txt && \
pip3 install --break-system-packages pyinstaller && \
pip3 install --break-system-packages --no-cache-dir -r requirements.txt
WORKDIR /opt/ciscoasa_honeypot
RUN pyinstaller asa_server.py --add-data "./asa:./asa"
# Setup user
addgroup -g 2000 ciscoasa && \
adduser -S -s /bin/bash -u 2000 -D -g 2000 ciscoasa && \
#
FROM alpine:3.20
RUN apk --no-cache -U upgrade
COPY --from=builder /opt/ciscoasa_honeypot/dist/ /opt/
# Get and install packages
mkdir -p /opt/ && \
cd /opt/ && \
git clone https://github.com/cymmetria/ciscoasa_honeypot && \
cd ciscoasa_honeypot && \
git checkout d6e91f1aab7fe6fc01fabf2046e76b68dd6dc9e2 && \
sed -i "s/git+git/git+https/g" requirements.txt && \
pip3 install --break-system-packages --no-cache-dir -r requirements.txt && \
cp /root/dist/asa_server.py /opt/ciscoasa_honeypot && \
chown -R ciscoasa:ciscoasa /opt/ciscoasa_honeypot && \
#
# Clean up
apk del --purge build-base \
git \
libffi-dev \
openssl-dev \
python3-dev && \
rm -rf /root/* && \
rm -rf /opt/ciscoasa_honeypot/.git && \
rm -rf /var/cache/apk/*
#
# Start ciscoasa
STOPSIGNAL SIGINT
WORKDIR /opt/asa_server/
USER 2000:2000
CMD ./asa_server --ike-port 5000 --enable_ssl --port 8443 --verbose >> /var/log/ciscoasa/ciscoasa.log 2>&1
WORKDIR /tmp/ciscoasa/
USER ciscoasa:ciscoasa
CMD cp -R /opt/ciscoasa_honeypot/* /tmp/ciscoasa && exec python3 asa_server.py --ike-port 5000 --enable_ssl --port 8443 --verbose >> /var/log/ciscoasa/ciscoasa.log 2>&1

307
docker/ciscoasa/dist/asa_server.py vendored Normal file
View file

@ -0,0 +1,307 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import socket
import logging
logging.basicConfig(format='%(message)s')
import threading
from io import BytesIO
from xml.etree import ElementTree
from http.server import HTTPServer
from socketserver import ThreadingMixIn
from http.server import SimpleHTTPRequestHandler
import ike_server
import datetime
class NonBlockingHTTPServer(ThreadingMixIn, HTTPServer):
pass
class hpflogger:
def __init__(self, hpfserver, hpfport, hpfident, hpfsecret, hpfchannel, serverid, verbose):
self.hpfserver=hpfserver
self.hpfport=hpfport
self.hpfident=hpfident
self.hpfsecret=hpfsecret
self.hpfchannel=hpfchannel
self.serverid=serverid
self.hpc=None
self.verbose=verbose
if (self.hpfserver and self.hpfport and self.hpfident and self.hpfport and self.hpfchannel and self.serverid):
import hpfeeds
try:
self.hpc = hpfeeds.new(self.hpfserver, self.hpfport, self.hpfident, self.hpfsecret)
logger.debug("Logging to hpfeeds using server: {0}, channel {1}.".format(self.hpfserver, self.hpfchannel))
except (hpfeeds.FeedException, socket.error, hpfeeds.Disconnect):
logger.critical("hpfeeds connection not successful")
def log(self, level, message):
if self.hpc:
if level in ['debug', 'info'] and not self.verbose:
return
self.hpc.publish(self.hpfchannel, "["+self.serverid+"] ["+level+"] ["+datetime.datetime.now().isoformat() +"] " + str(message))
def header_split(h):
return [list(map(str.strip, l.split(': ', 1))) for l in h.strip().splitlines()]
class WebLogicHandler(SimpleHTTPRequestHandler):
logger = None
hpfl = None
protocol_version = "HTTP/1.1"
EXPLOIT_STRING = b"host-scan-reply"
RESPONSE = b"""<?xml version="1.0" encoding="UTF-8"?>
<config-auth client="vpn" type="complete">
<version who="sg">9.0(1)</version>
<error id="98" param1="" param2="">VPN Server could not parse request.</error>
</config-auth>"""
basepath = os.path.dirname(os.path.abspath(__file__))
alert_function = None
def setup(self):
SimpleHTTPRequestHandler.setup(self)
self.request.settimeout(3)
def send_header(self, keyword, value):
if keyword.lower() == 'server':
return
SimpleHTTPRequestHandler.send_header(self, keyword, value)
def send_head(self):
# send_head will return a file object that do_HEAD/GET will use
# do_GET/HEAD are already implemented by SimpleHTTPRequestHandler
filename = os.path.basename(self.path.rstrip('/').split('?', 1)[0])
if self.path == '/':
self.send_response(200)
for k, v in header_split("""
Content-Type: text/html
Cache-Control: no-cache
Pragma: no-cache
Set-Cookie: tg=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpn=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnc=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpn_portal=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnSharePoint=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnlogin=1; path=/; secure
Set-Cookie: sdesktop=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
"""):
self.send_header(k, v)
self.end_headers()
return BytesIO(b'<html><script>document.location.replace("/+CSCOE+/logon.html")</script></html>\n')
elif filename == 'asa': # don't allow dir listing
return self.send_file('wrong_url.html', 403)
else:
return self.send_file(filename)
def redirect(self, loc):
self.send_response(302)
for k, v in header_split("""
Content-Type: text/html
Content-Length: 0
Cache-Control: no-cache
Pragma: no-cache
Location: %s
Set-Cookie: tg=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
""" % (loc,)):
self.send_header(k, v)
self.end_headers()
def do_GET(self):
if self.path == '/+CSCOE+/logon.html':
self.redirect('/+CSCOE+/logon.html?fcadbadd=1')
return
elif self.path.startswith('/+CSCOE+/logon.html?') and 'reason=1' in self.path:
self.wfile.write(self.send_file('logon_failure').getvalue())
return
SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
data_len = int(self.headers.get('Content-length', 0))
data = self.rfile.read(data_len) if data_len else b''
body = self.RESPONSE
if self.EXPLOIT_STRING in data:
xml = ElementTree.fromstring(data)
payloads = []
for x in xml.iter('host-scan-reply'):
payloads.append(x.text)
self.alert_function(self.client_address[0], self.client_address[1], payloads)
elif self.path == '/':
self.redirect('/+webvpn+/index.html')
return
elif self.path == '/+CSCOE+/logon.html':
self.redirect('/+CSCOE+/logon.html?fcadbadd=1')
return
elif self.path.split('?', 1)[0] == '/+webvpn+/index.html':
with open(os.path.join(self.basepath, 'asa', "logon_redir.html"), 'rb') as fh:
body = fh.read()
self.send_response(200)
self.send_header('Content-Length', int(len(body)))
self.send_header('Content-Type', 'text/html; charset=UTF-8')
self.end_headers()
self.wfile.write(body)
return
def send_file(self, filename, status_code=200, headers=[]):
try:
with open(os.path.join(self.basepath, 'asa', filename), 'rb') as fh:
body = fh.read()
self.send_response(status_code)
for k, v in headers:
self.send_header(k, v)
if status_code == 200:
for k, v in header_split("""
Cache-Control: max-age=0
Set-Cookie: webvpn=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnc=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnlogin=1; secure
X-Transcend-Version: 1
"""):
self.send_header(k, v)
self.send_header('Content-Length', int(len(body)))
self.send_header('Content-Type', 'text/html')
self.end_headers()
return BytesIO(body)
except IOError:
return self.send_file('wrong_url.html', 404)
def log_message(self, format, *args):
self.logger.debug("{'timestamp': '%s', 'src_ip': '%s', 'payload_printable': '%s'}" %
(datetime.datetime.now().isoformat(),
self.client_address[0],
format % args))
self.hpfl.log('debug', "%s - - [%s] %s" %
(self.client_address[0],
self.log_date_time_string(),
format % args))
def handle_one_request(self):
"""Handle a single HTTP request.
Overriden to not send 501 errors
"""
self.close_connection = True
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.close_connection = 1
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.log_request()
self.close_connection = True
return
method = getattr(self, mname)
method()
self.wfile.flush() # actually send the response if not already done.
except socket.timeout as e:
# a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
if __name__ == '__main__':
import click
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.info('info')
@click.command()
@click.option('-h', '--host', default='0.0.0.0', help='Host to listen')
@click.option('-p', '--port', default=8443, help='Port to listen', type=click.INT)
@click.option('-i', '--ike-port', default=5000, help='Port to listen for IKE', type=click.INT)
@click.option('-s', '--enable_ssl', default=False, help='Enable SSL', is_flag=True)
@click.option('-c', '--cert', default=None, help='Certificate File Path (will generate self signed '
'cert if not supplied)')
@click.option('-v', '--verbose', default=False, help='Verbose logging', is_flag=True)
# hpfeeds options
@click.option('--hpfserver', default=os.environ.get('HPFEEDS_SERVER'), help='HPFeeds Server')
@click.option('--hpfport', default=os.environ.get('HPFEEDS_PORT'), help='HPFeeds Port', type=click.INT)
@click.option('--hpfident', default=os.environ.get('HPFEEDS_IDENT'), help='HPFeeds Ident')
@click.option('--hpfsecret', default=os.environ.get('HPFEEDS_SECRET'), help='HPFeeds Secret')
@click.option('--hpfchannel', default=os.environ.get('HPFEEDS_CHANNEL'), help='HPFeeds Channel')
@click.option('--serverid', default=os.environ.get('SERVERID'), help='Verbose logging')
def start(host, port, ike_port, enable_ssl, cert, verbose, hpfserver, hpfport, hpfident, hpfsecret, hpfchannel, serverid):
"""
A low interaction honeypot for the Cisco ASA component capable of detecting CVE-2018-0101,
a DoS and remote code execution vulnerability
"""
hpfl=hpflogger(hpfserver, hpfport, hpfident, hpfsecret, hpfchannel, serverid, verbose)
def alert(cls, host, port, payloads):
logger.critical({
'timestamp': datetime.datetime.utcnow().isoformat(),
'src_ip': host,
'src_port': port,
'payload_printable': payloads,
})
#log to hpfeeds
hpfl.log("critical", {
'src': host,
'spt': port,
'data': payloads,
})
if verbose:
logger.setLevel(logging.DEBUG)
requestHandler = WebLogicHandler
requestHandler.alert_function = alert
requestHandler.logger = logger
requestHandler.hpfl = hpfl
def log_date_time_string():
"""Return the current time formatted for logging."""
now = datetime.datetime.now().isoformat()
return now
def ike():
ike_server.start(host, ike_port, alert, logger, hpfl)
t = threading.Thread(target=ike)
t.daemon = True
t.start()
httpd = HTTPServer((host, port), requestHandler)
if enable_ssl:
import ssl
if not cert:
import gencert
cert = gencert.gencert()
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=cert, server_side=True)
logger.info('Starting server on port {:d}/tcp, use <Ctrl-C> to stop'.format(port))
hpfl.log('info', 'Starting server on port {:d}/tcp, use <Ctrl-C> to stop'.format(port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
logger.info('Stopping server.')
hpfl.log('info', 'Stopping server.')
httpd.server_close()
start()

View file

@ -1,3 +1,5 @@
version: '2.3'
networks:
ciscoasa_local:

View file

@ -1,21 +1,21 @@
FROM alpine:3.20 AS builder
FROM alpine:3.19
#
# Install packages
RUN apk --no-cache -U upgrade && \
apk --no-cache -U add \
build-base \
RUN apk --no-cache -U add \
git \
libcap \
openssl \
py3-pip \
python3 && \
pip3 install --break-system-packages --no-cache-dir \
pyinstaller \
python-json-logger
#
pip3 install --break-system-packages --no-cache-dir python-json-logger && \
#
# Install CitrixHoneypot from GitHub
RUN git clone https://github.com/t3chn0m4g3/CitrixHoneypot /opt/citrixhoneypot && \
git clone https://github.com/t3chn0m4g3/CitrixHoneypot /opt/citrixhoneypot && \
cd /opt/citrixhoneypot && \
git checkout dee32447033a0296d053e8f881bf190f9dd7ad44 && \
git checkout f59ad7320dc5bbb8c23c8baa5f111b52c52fbef3 && \
#
# Setup user, groups and configs
mkdir -p /opt/citrixhoneypot/logs /opt/citrixhoneypot/ssl && \
openssl req \
-nodes \
@ -25,19 +25,20 @@ RUN git clone https://github.com/t3chn0m4g3/CitrixHoneypot /opt/citrixhoneypot &
-out "/opt/citrixhoneypot/ssl/cert.pem" \
-days 365 \
-subj '/C=AU/ST=Some-State/O=Internet Widgits Pty Ltd' && \
chown 2000:2000 -R ssl/
addgroup -g 2000 citrixhoneypot && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 citrixhoneypot && \
chown -R citrixhoneypot:citrixhoneypot /opt/citrixhoneypot && \
setcap cap_net_bind_service=+ep $(readlink -f $(type -P python3)) && \
#
WORKDIR /opt/citrixhoneypot
RUN pyinstaller CitrixHoneypot.py
#
FROM alpine:3.20
RUN apk --no-cache -U upgrade
COPY --from=builder /opt/citrixhoneypot/dist/CitrixHoneypot/ /opt/citrixhoneypot
COPY --from=builder /opt/citrixhoneypot/ssl /opt/citrixhoneypot/ssl
COPY --from=builder /opt/citrixhoneypot/responses/ /opt/citrixhoneypot/responses
# Clean up
apk del --purge git \
openssl && \
rm -rf /root/* && \
rm -rf /opt/citrixhoneypot/.git && \
rm -rf /var/cache/apk/*
#
# Set workdir and start citrixhoneypot
STOPSIGNAL SIGINT
USER 2000:2000
USER citrixhoneypot:citrixhoneypot
WORKDIR /opt/citrixhoneypot/
CMD nohup ./CitrixHoneypot
CMD nohup /usr/bin/python3 CitrixHoneypot.py

View file

@ -1,3 +1,5 @@
version: '2.3'
networks:
citrixhoneypot_local:

View file

@ -3,10 +3,9 @@ FROM alpine:3.19
# Include dist
COPY dist/ /root/dist/
#
# Install packages
RUN apk --no-cache -U upgrade && \
apk --no-cache -U add \
build-base \
# Setup apt
RUN apk --no-cache -U add \
build-base \
cython \
file \
git \
@ -46,12 +45,12 @@ RUN apk --no-cache -U upgrade && \
# Setup ConPot
git clone https://github.com/t3chn0m4g3/cpppo /opt/cpppo && \
cd /opt/cpppo && \
git checkout 350d5187a941e7359c53087dcb1f0e41ece5682c && \
pip3 install --break-system-packages --no-cache-dir --upgrade pip && \
pip3 install --break-system-packages --no-cache-dir . && \
git clone https://github.com/mushorg/conpot /opt/conpot && \
cd /opt/conpot/ && \
git checkout 26c67d11b08a855a28e87abd186d959741f46c7f && \
# git checkout b3740505fd26d82473c0d7be405b372fa0f82575 && \
# Change template default ports if <1024
sed -i 's/port="2121"/port="21"/' /opt/conpot/conpot/templates/default/ftp/ftp.xml && \
sed -i 's/port="8800"/port="80"/' /opt/conpot/conpot/templates/default/http/http.xml && \
@ -87,14 +86,11 @@ RUN apk --no-cache -U upgrade && \
libxslt-dev \
mariadb-dev \
pkgconfig \
py3-pip \
python3-dev \
wget && \
rm -rf /root/* \
/tmp/* \
/var/cache/apk/* \
/opt/cpppo/.git \
/opt/conpot/.git
rm -rf /root/* && \
rm -rf /tmp/* && \
rm -rf /var/cache/apk/*
#
# Start conpot
STOPSIGNAL SIGINT

View file

@ -1,8 +1,8 @@
pysnmp-mibs
pysmi==0.3.4
pysmi
libtaxii>=1.1.0
crc16
scapy==2.4.5
scapy==2.4.3rc1
hpfeeds3
modbus-tk
stix-validator

View file

@ -1,4 +1,6 @@
# CONPOT TEMPLATE=[default, IEC104, guardian_ast, ipmi, kamstrup_382, proxy]
version: '2.3'
networks:
conpot_local_default:
conpot_local_IEC104:

View file

@ -1,11 +1,10 @@
FROM alpine:3.20
FROM alpine:3.19
#
# Include dist
COPY dist/ /root/dist/
#
# Install packages
RUN apk --no-cache -U upgrade && \
apk --no-cache -U add \
# Get and install dependencies & packages
RUN apk --no-cache -U add \
bash \
build-base \
git \
@ -41,15 +40,17 @@ RUN apk --no-cache -U upgrade && \
# Install cowrie
mkdir -p /home/cowrie && \
cd /home/cowrie && \
# git clone --depth=1 https://github.com/cowrie/cowrie -b v2.5.0 && \
git clone https://github.com/cowrie/cowrie && \
cd cowrie && \
git checkout 7b18207485dbfc218082e82c615d948924429973 && \
git checkout 3394082040c02d91e79efa2c640ad68da9fe2231 && \
mkdir -p log && \
# cp /root/dist/requirements.txt . && \
pip3 install --break-system-packages --upgrade --no-cache-dir pip && \
pip3 install --break-system-packages --no-cache-dir -r requirements.txt && \
cp /root/dist/requirements.txt . && \
pip3 install --break-system-packages --upgrade pip && \
pip3 install --break-system-packages -r requirements.txt && \
#
# Setup configs
#export PYTHON_DIR=$(python3 --version | tr '[A-Z]' '[a-z]' | tr -d ' ' | cut -d '.' -f 1,2 ) && \
setcap cap_net_bind_service=+ep $(readlink -f $(type -P python3)) && \
cp /root/dist/cowrie.cfg /home/cowrie/cowrie/cowrie.cfg && \
chown cowrie:cowrie -R /home/cowrie/* /usr/lib/$(readlink -f $(type -P python3) | cut -f4 -d"/")/site-packages/twisted/plugins && \
@ -59,7 +60,6 @@ RUN apk --no-cache -U upgrade && \
cd /home/cowrie/cowrie && \
/usr/bin/twistd --uid=2000 --gid=2000 -y cowrie.tac --pidfile cowrie.pid cowrie &" && \
sleep 10 && \
rm -rf /home/cowrie/cowrie/etc && \
#
# Clean up
apk del --purge build-base \
@ -72,10 +72,12 @@ RUN apk --no-cache -U upgrade && \
openssl-dev \
python3-dev \
py3-mysqlclient && \
rm -rf /root/* /tmp/* \
/var/cache/apk/* \
/home/cowrie/cowrie/cowrie.pid \
/home/cowrie/cowrie/.git
rm -rf /root/* /tmp/* && \
rm -rf /var/cache/apk/* && \
rm -rf /home/cowrie/cowrie/cowrie.pid && \
rm -rf /home/cowrie/cowrie/.git && \
# ln -s /usr/bin/python3 /usr/bin/python && \
unset PYTHON_DIR
#
# Start cowrie
ENV PYTHONPATH /home/cowrie/cowrie:/home/cowrie/cowrie/src

View file

@ -1,7 +1,6 @@
[honeypot]
hostname = ubuntu
log_path = log
logtype = plain
download_path = dl
share_path= share/cowrie
state_path = /tmp/cowrie/data
@ -16,18 +15,18 @@ backend = shell
timezone = UTC
auth_class = AuthRandom
auth_class_parameters = 2, 5, 10
data_path = src/cowrie/data
data_path = /tmp/cowrie/data
[shell]
filesystem = src/cowrie/data/fs.pickle
processes = src/cowrie/data/cmdoutput.json
filesystem = share/cowrie/fs.pickle
processes = share/cowrie/cmdoutput.json
#arch = linux-x64-lsb
arch = bsd-aarch64-lsb, bsd-aarch64-msb, bsd-bfin-msb, bsd-mips-lsb, bsd-mips-msb, bsd-mips64-lsb, bsd-mips64-msb, bsd-powepc-msb, bsd-powepc64-lsb, bsd-riscv64-lsb, bsd-sparc-msb, bsd-sparc64-msb, bsd-x32-lsb, bsd-x64-lsb, linux-aarch64-lsb, linux-aarch64-msb, linux-alpha-lsb, linux-am33-lsb, linux-arc-lsb, linux-arc-msb, linux-arm-lsb, linux-arm-msb, linux-avr32-lsb, linux-bfin-lsb, linux-c6x-lsb, linux-c6x-msb, linux-cris-lsb, linux-frv-msb, linux-h8300-msb, linux-hppa-msb, linux-hppa64-msb, linux-ia64-lsb, linux-m32r-msb, linux-m68k-msb, linux-microblaze-msb, linux-mips-lsb, linux-mips-msb, linux-mips64-lsb, linux-mips64-msb, linux-mn10300-lsb, linux-nios-lsb, linux-nios-msb, linux-powerpc-lsb, linux-powerpc-msb, linux-powerpc64-lsb, linux-powerpc64-msb, linux-riscv64-lsb, linux-s390x-msb, linux-sh-lsb, linux-sh-msb, linux-sparc-msb, linux-sparc64-msb, linux-tilegx-lsb, linux-tilegx-msb, linux-tilegx64-lsb, linux-tilegx64-msb, linux-x64-lsb, linux-x86-lsb, linux-xtensa-msb, osx-x32-lsb, osx-x64-lsb
kernel_version = 5.15.0-23-generic-amd64
kernel_build_string = #25~22.04-Ubuntu SMP
kernel_version = 3.2.0-4-amd64
kernel_build_string = #1 SMP Debian 3.2.68-1+deb7u1
hardware_platform = x86_64
operating_system = GNU/Linux
ssh_version = OpenSSH_8.9p1, OpenSSL 3.0.2 15 Mar 2022
ssh_version = OpenSSH_7.9p1, OpenSSL 1.1.1a 20 Nov 2018
[ssh]
enabled = true
@ -40,7 +39,8 @@ ecdsa_private_key = etc/ssh_host_ecdsa_key
ed25519_public_key = etc/ssh_host_ed25519_key.pub
ed25519_private_key = etc/ssh_host_ed25519_key
public_key_auth = ssh-rsa,ssh-dss,ecdsa-sha2-nistp256,ssh-ed25519
version = SSH-2.0-OpenSSH_8.9p1 Ubuntu-3ubuntu0.10
#version = SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.2
version = SSH-2.0-OpenSSH_7.9p1
ciphers = aes128-ctr,aes192-ctr,aes256-ctr,aes256-cbc,aes192-cbc,aes128-cbc,3des-cbc,blowfish-cbc,cast128-cbc
macs = hmac-sha2-512,hmac-sha2-384,hmac-sha2-56,hmac-sha1,hmac-md5
compression = zlib@openssh.com,zlib,none
@ -51,7 +51,6 @@ forward_redirect = false
forward_tunnel = false
auth_none_enabled = false
auth_keyboard_interactive_enabled = true
auth_publickey_allow_any = true
[telnet]
enabled = true

View file

@ -1,3 +1,5 @@
version: '2.3'
networks:
cowrie_local:
@ -18,7 +20,7 @@ services:
ports:
- "22:22"
- "23:23"
image: "ghcr.io/telekom-security/cowrie:24.04.1"
image: "dtagdevsec/cowrie:24.04"
read_only: true
volumes:
- $HOME/tpotce/data/cowrie/downloads:/home/cowrie/cowrie/dl

View file

@ -1,13 +1,13 @@
FROM alpine:3.20 AS builder
FROM alpine:3.19
#
# Include dist
COPY dist/ /root/dist/
#
# Install packages
RUN apk --no-cache -U upgrade && \
apk --no-cache -U add \
RUN apk --no-cache -U add \
build-base \
git \
libcap \
py3-colorama \
py3-greenlet \
py3-pip \
@ -21,45 +21,43 @@ RUN apk --no-cache -U upgrade && \
# Install ddospot from GitHub and setup
mkdir -p /opt && \
cd /opt/ && \
git clone https://github.com/t3chn0m4g3/ddospot && \
git clone https://github.com/aelth/ddospot && \
cd ddospot && \
git checkout 66b94f3cf56c66e2e26b55feff9e65493cfadf3c && \
git checkout 49f515237bd2d5744290ed21dcca9b53def243ba && \
# We only want JSON events, setting logger format to ('') ...
sed -i "/handler.setFormatter(logging.Formatter(/{n;N;d}" /opt/ddospot/ddospot/core/potloader.py && \
sed -i "s#handler.setFormatter(logging.Formatter(#handler.setFormatter(logging.Formatter(''))#g" /opt/ddospot/ddospot/core/potloader.py && \
# ... and remove msg from log message for individual honeypots
sed -i "s#self.logger.info('\%s - \%s' \% (msg, raw_json))#self.logger.info(raw_json)#g" /opt/ddospot/ddospot/pots/chargen/chargen.py && \
sed -i "s#self.logger.info('New DNS query - \%s' \% (raw_json))#self.logger.info(raw_json)#g" /opt/ddospot/ddospot/pots/dns/dns.py && \
sed -i "s#self.logger.info('\%s - \%s' \% (msg, raw_json))#self.logger.info(raw_json)#g" /opt/ddospot/ddospot/pots/generic/generic.py && \
sed -i "s#self.logger.info('\%s - \%s' \% (msg, raw_json))#self.logger.info(raw_json)#g" /opt/ddospot/ddospot/pots/ntp/ntp.py && \
sed -i "s#self.logger.info('\%s - \%s' \% (msg, raw_json))#self.logger.info(raw_json)#g" /opt/ddospot/ddospot/pots/ssdp/ssdp.py && \
# We are using logrotate
sed -i "s#rotate_size = 10#rotate_size = 9999#g" /opt/ddospot/ddospot/pots/chargen/chargenpot.conf && \
sed -i "s#rotate_size = 10#rotate_size = 9999#g" /opt/ddospot/ddospot/pots/dns/dnspot.conf && \
sed -i "s#rotate_size = 10#rotate_size = 9999#g" /opt/ddospot/ddospot/pots/generic/genericpot.conf && \
sed -i "s#rotate_size = 10#rotate_size = 9999#g" /opt/ddospot/ddospot/pots/ntp/ntpot.conf && \
sed -i "s#rotate_size = 10#rotate_size = 9999#g" /opt/ddospot/ddospot/pots/ssdp/ssdpot.conf && \
cp /root/dist/requirements.txt . && \
pip3 install --break-system-packages -r ddospot/requirements.txt && \
pip3 install --break-system-packages pyinstaller
WORKDIR /opt/ddospot/ddospot
RUN pyinstaller ddospot.py \
--add-data "core:core" \
--hidden-import core \
--hidden-import cmd \
--hidden-import configparser \
--hidden-import colorama \
--hidden-import tabulate \
--hidden-import logging.handlers \
--hidden-import hpfeeds \
--hidden-import json \
--hidden-import http.server \
--hidden-import sys \
--hidden-import sqlalchemy \
--hidden-import sqlalchemy.ext.declarative \
--hidden-import schedule \
--hidden-import twisted \
--hidden-import twisted.internet \
--hidden-import twisted.internet.reactor \
--hidden-import twisted.names.client \
--hidden-import twisted.names.server \
--hidden-import twisted.python \
--hidden-import OpenSSL.crypto \
--hidden-import OpenSSL.SSL
setcap cap_net_bind_service=+ep $(readlink -f $(type -P python3)) && \
#
FROM alpine:3.20
RUN apk --no-cache -U upgrade
COPY --from=builder /opt/ddospot/ddospot/dist/ddospot/ /opt/ddospot/ddospot
COPY --from=builder /opt/ddospot/ddospot/global.conf /opt/ddospot/ddospot/
COPY --from=builder /opt/ddospot/ddospot/pots /opt/ddospot/ddospot/pots
# Setup user, groups and configs
addgroup -g 2000 ddospot && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 ddospot && \
chown ddospot:ddospot -R /opt/ddospot && \
#
# Clean up
apk del --purge build-base \
git \
python3-dev && \
rm -rf /root/* && \
rm -rf /opt/ddospot/.git && \
rm -rf /var/cache/apk/*
#
# Start ddospot
STOPSIGNAL SIGINT
USER 2000:2000
WORKDIR /opt/ddospot/ddospot
CMD ["./ddospot", "-n"]
USER ddospot:ddospot
WORKDIR /opt/ddospot/ddospot/
CMD ["/usr/bin/python3","ddospot.py", "-n"]

View file

@ -1,3 +1,5 @@
version: '2.3'
networks:
ddospot_local:

View file

@ -0,0 +1,34 @@
FROM alpine:latest
#
# Include dist
ADD dist/ /root/dist/
#
# Install packages
RUN apk -U --no-cache add \
git \
py3-pip \
python3 && \
pip3 install --no-cache-dir bottle \
configparser \
datetime \
requests && \
mkdir -p /opt && \
cd /opt/ && \
git clone --depth=1 https://github.com/schmalle/ElasticpotPY.git && \
#
# Setup user, groups and configs
addgroup -g 2000 elasticpot && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 elasticpot && \
mv /root/dist/elasticpot.cfg /opt/ElasticpotPY/ && \
mkdir /opt/ElasticpotPY/log && \
#
# Clean up
apk del --purge git && \
rm -rf /root/* && \
rm -rf /var/cache/apk/*
#
# Start elasticpot
STOPSIGNAL SIGINT
USER elasticpot:elasticpot
WORKDIR /opt/ElasticpotPY/
CMD ["/usr/bin/python3","main.py"]

View file

@ -0,0 +1,15 @@
[![](https://images.microbadger.com/badges/version/ghcr.io/telekom-security/elasticpot:1903.svg)](https://microbadger.com/images/ghcr.io/telekom-security/elasticpot:1903 "Get your own version badge on microbadger.com") [![](https://images.microbadger.com/badges/image/ghcr.io/telekom-security/elasticpot:1903.svg)](https://microbadger.com/images/ghcr.io/telekom-security/elasticpot:1903 "Get your own image badge on microbadger.com")
# elasticpot
[elasticpot](https://github.com/schmalle/ElasticPot) is a simple elastic search honeypot.
This dockerized version is part of the **[T-Pot community honeypot](http://telekom-security.github.io/)** of Deutsche Telekom AG.
The `Dockerfile` contains the blueprint for the dockerized elasticpot and will be used to setup the docker image.
The `docker-compose.yml` contains the necessary settings to test elasticpot using `docker-compose`. This will ensure to start the docker container with the appropriate permissions and port mappings.
# ElasticPot Dashboard
![ElasticPot Dashboard](doc/dashboard.png)

View file

@ -0,0 +1,31 @@
# ElasticPot Config
[MAIN]
# Manually set the externally accessible IP of the honeypot
ip = 192.168.1.1
[ELASTICPOT]
# ID pf the elasticpot instance
nodeid = elasticpot-community-01
# Location of the json logfile
logfile = log/elasticpot.log
# Set elasticpot = False to disable json logging and enable automatic attack submission to ews backend (soap)
elasticpot = True
[EWS]
# Note: Only relevant if "elasticpot = False"
# Username for ews submission
username = community-01-user
# Token for ews submission
token = foth{a5maiCee8fineu7
# API endpoint for ews submission
rhost_first = https://community.sicherheitstacho.eu/ews-0.1/alert/postSimpleMessage
# Ignore certificate warnings
ignorecert = false

Binary file not shown.

After

Width:  |  Height:  |  Size: 789 KiB

View file

@ -0,0 +1,20 @@
version: '2.3'
networks:
elasticpot_local:
services:
# Elasticpot service
elasticpot:
build: .
container_name: elasticpot
restart: always
networks:
- elasticpot_local
ports:
- "9200:9200"
image: "ghcr.io/telekom-security/elasticpot:2006"
read_only: true
volumes:
- /data/elasticpot/log:/opt/ElasticpotPY/log

View file

@ -0,0 +1,73 @@
FROM alpine
# Include dist
ADD dist/ /root/dist/
# Install packages
RUN apk -U --no-cache add \
autoconf \
bind-tools \
build-base \
# cython \
git \
libffi \
libffi-dev \
libcap \
libxslt-dev \
make \
php7 \
php7-dev \
openssl-dev \
py-mysqldb \
py-openssl \
py-pip \
py-setuptools \
python \
python-dev && \
pip install --no-cache-dir --upgrade pip && \
# Install php sandbox from git
git clone --depth=1 https://github.com/mushorg/BFR /opt/BFR && \
cd /opt/BFR && \
phpize7 && \
./configure \
--with-php-config=/usr/bin/php-config7 \
--enable-bfr && \
make && \
make install && \
cd / && \
rm -rf /opt/BFR /tmp/* /var/tmp/* && \
echo "zend_extension = "$(find /usr -name bfr.so) >> /etc/php7/php.ini && \
# Install glastopf from git
git clone --depth=1 https://github.com/mushorg/glastopf.git /opt/glastopf && \
cd /opt/glastopf && \
cp /root/dist/requirements.txt . && \
pip install --no-cache-dir . && \
cd / && \
rm -rf /opt/glastopf /tmp/* /var/tmp/* && \
setcap cap_net_bind_service=+ep /usr/bin/python2.7 && \
# Setup user, groups and configs
addgroup -g 2000 glastopf && \
adduser -S -H -u 2000 -D -g 2000 glastopf && \
mkdir -p /etc/glastopf && \
mv /root/dist/glastopf.cfg /etc/glastopf/ && \
# Clean up
apk del --purge autoconf \
build-base \
file \
git \
libffi-dev \
php7-dev \
python-dev \
py-pip && \
rm -rf /root/* && \
rm -rf /var/cache/apk/*
# Set workdir and start glastopf
STOPSIGNAL SIGINT
USER glastopf:glastopf
WORKDIR /tmp/glastopf/
CMD cp /etc/glastopf/glastopf.cfg /tmp/glastopf && exec glastopf-runner

View file

@ -0,0 +1,15 @@
[![](https://images.microbadger.com/badges/version/ghcr.io/telekom-security/glastopf:1903.svg)](https://microbadger.com/images/ghcr.io/telekom-security/glastopf:1903 "Get your own version badge on microbadger.com") [![](https://images.microbadger.com/badges/image/ghcr.io/telekom-security/glastopf:1903.svg)](https://microbadger.com/images/ghcr.io/telekom-security/glastopf:1903 "Get your own image badge on microbadger.com")
# glastopf (deprecated)
[glastopf](https://github.com/mushorg/glastopf) is a python web application honeypot.
This dockerized version is part of the **[T-Pot community honeypot](http://telekom-security.github.io/)** of Deutsche Telekom AG.
The `Dockerfile` contains the blueprint for the dockerized glastopf and will be used to setup the docker image.
The `docker-compose.yml` contains the necessary settings to test glastopf using `docker-compose`. This will ensure to start the docker container with the appropriate permissions and port mappings.
# Glastopf Dashboard
![Glastopf Dashboard](doc/dashboard.png)

View file

@ -0,0 +1,115 @@
[webserver]
host = 0.0.0.0
port = 80
uid = glastopf
gid = glastopf
proxy_enabled = False
[ssl]
enabled = False
certfile =
keyfile =
#Generic logging for general monitoring
[logging]
consolelog_enabled = True
filelog_enabled = True
logfile = log/glastopf.log
[dork-db]
enabled = True
pattern = rfi
# Extracts dorks from a online dorks service operated by The Honeynet Project
# This service is down until further notice!
mnem_service = False
[hpfeed]
enabled = False
host = hpfriends.honeycloud.net
port = 20000
secret = 3wis3l2u5l7r3cew
# channels comma separated
chan_events = glastopf.events
chan_files = glastopf.files
ident = x8yer@hp1
[main-database]
#If disabled a sqlite database will be created (db/glastopf.db)
#to be used as dork storage.
enabled = True
#mongodb or sqlalchemy connection string, ex:
#mongodb://localhost:27017/glastopf
#mongodb://james:bond@localhost:27017/glastopf
#mysql://james:bond@somehost.com/glastopf
connection_string = sqlite:///db/glastopf.db
[surfcertids]
enabled = False
host = localhost
port = 5432
user =
password =
database = idsserver
[syslog]
enabled = False
socket = /dev/log
[mail]
enabled = False
# an email notification will be sent only if a specified matched pattern is identified.
# Use the wildcard char *, to be notified every time
patterns = rfi,lfi
user =
pwd =
mail_from =
mail_to =
smtp_host = smtp.gmail.com
smtp_port = 587
[taxii]
enabled = False
host = taxiitest.mitre.org
port = 80
inbox_path = /services/inbox/default/
use_https = False
use_auth_basic = False
auth_basic_username = your_username
auth_basic_password = your_password
use_auth_certificate = False
auth_certificate_keyfile = full_path_to_keyfile
auth_certificate_certfile = full_path_to_certfile
include_contact_info = False
contact_name = ...
contact_email = ...
[logstash]
enabled = False
host = localhost
port = 5659
handler = AMQP/TCP/UDP
[misc]
# set webserver banner
banner = Apache/2.0.48
[surface]
#https://www.google.com/webmasters/
google_meta =
#http://www.bing.com/toolbox/webmaster
bing_meta =
[sensor]
sensorid = None
[profiler]
enabled = False
[s3storage]
enabled = False
endpoint = http://localhost:8080/
aws_access_key_id = YOUR_aws_access_key_id
aws_secret_access_key = YOUR_aws_access_key_id
bucket = glastopf
region = eu-west-1
signature_version = s3

View file

@ -0,0 +1,35 @@
asn1crypto==0.24.0
BeautifulSoup==3.2.1
beautifulsoup4==4.6.1
botocore==1.11.6
certifi==2018.4.16
cffi==1.10.0
chardet==3.0.4
cryptography==2.1.4
cssselect==1.0.0
Cython==0.28.2
docutils==0.14
enum34==1.1.6
gevent==1.2.2
greenlet==0.4.13
hpfeeds==1.0
idna==2.6
ipaddress==1.0.22
Jinja2==2.9.6
jmespath==0.9.3
libtaxii==1.1.111
lxml==4.2.4
MarkupSafe==1.0
MySQL-python==1.2.5
pyasn1==0.4.2
pycparser==2.18
pylibinjection==0.2.4
pymongo==3.2.2
pyOpenSSL==17.2.0
python-dateutil==2.6.1
python-logstash==0.4.6
requests==2.18.4
six==1.11.0
SQLAlchemy==1.2.7
urllib3==1.22
WebOb==1.2.3

Binary file not shown.

After

Width:  |  Height:  |  Size: 793 KiB

View file

@ -0,0 +1,23 @@
version: '2.3'
networks:
glastopf_local:
services:
# Glastopf service
glastopf:
build: .
container_name: glastopf
tmpfs:
- /tmp/glastopf:uid=2000,gid=2000
restart: always
networks:
- glastopf_local
ports:
- "8081:80"
image: "ghcr.io/telekom-security/glastopf:1903"
read_only: true
volumes:
- /data/glastopf/db:/tmp/glastopf/db
- /data/glastopf/log:/tmp/glastopf/log

View file

@ -0,0 +1,36 @@
FROM alpine:3.15
#
# Setup env and apt
RUN apk -U add \
curl \
git \
nodejs \
#nodejs-npm && \
npm && \
#
# Get and install packages
mkdir -p /usr/src/app/ && \
cd /usr/src/app/ && \
git clone https://github.com/mobz/elasticsearch-head . && \
git checkout 2d51fecac2980d350fcd3319fd9fe2999f63c9db && \
npm install http-server && \
sed -i "s#\"http\:\/\/localhost\:9200\"#window.location.protocol \+ \'\/\/\' \+ window.location.hostname \+ \'\:\' \+ window.location.port \+ \'\/es\/\'#" /usr/src/app/_site/app.js && \
#
# Setup user, groups and configs
addgroup -g 2000 head && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 head && \
chown -R head:head /usr/src/app/ && \
#
# Clean up
apk del --purge git && \
rm -rf /root/* && \
rm -rf /tmp/* && \
rm -rf /var/cache/apk/*
#
# Healthcheck
HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:9100'
#
# Start elasticsearch-head
USER head:head
WORKDIR /usr/src/app
CMD ["node_modules/http-server/bin/http-server", "_site", "-p", "9100"]

View file

@ -0,0 +1,16 @@
version: '2.3'
services:
## Elasticsearch-head service
head:
build: .
container_name: head
restart: always
# depends_on:
# elasticsearch:
# condition: service_healthy
ports:
- "127.0.0.1:64302:9100"
image: "dtagdevsec/head:24.04"
read_only: true

View file

@ -0,0 +1,55 @@
FROM alpine:3.11
#
# Include dist
ADD dist/ /root/dist/
#
# Install packages
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
apk -U --no-cache add \
build-base \
git \
libcap \
python2 \
python2-dev \
py2-pip && \
#
# Install virtualenv
pip install --no-cache-dir virtualenv && \
#
# Clone honeypy from git
git clone https://github.com/foospidy/HoneyPy /opt/honeypy && \
cd /opt/honeypy && \
git checkout feccab56ca922bcab01cac4ffd82f588d61ab1c5 && \
sed -i 's/local_host/dest_ip/g' /opt/honeypy/loggers/file/honeypy_file.py && \
sed -i 's/local_port/dest_port/g' /opt/honeypy/loggers/file/honeypy_file.py && \
sed -i 's/remote_host/src_ip/g' /opt/honeypy/loggers/file/honeypy_file.py && \
sed -i 's/remote_port/src_port/g' /opt/honeypy/loggers/file/honeypy_file.py && \
sed -i 's/service/proto/g' /opt/honeypy/loggers/file/honeypy_file.py && \
sed -i 's/event/event_type/g' /opt/honeypy/loggers/file/honeypy_file.py && \
sed -i 's/bytes/size/g' /opt/honeypy/loggers/file/honeypy_file.py && \
sed -i 's/date_time/timestamp/g' /opt/honeypy/loggers/file/honeypy_file.py && \
sed -i 's/data,/data.decode("hex"),/g' /opt/honeypy/loggers/file/honeypy_file.py && \
sed -i 's/urllib3/urllib3 == 1.21.1/g' /opt/honeypy/requirements.txt && \
virtualenv env && \
cp /root/dist/services.cfg /opt/honeypy/etc && \
cp /root/dist/honeypy.cfg /opt/honeypy/etc && \
/opt/honeypy/env/bin/pip install -r /opt/honeypy/requirements.txt && \
#
# Setup user, groups and configs
addgroup -g 2000 honeypy && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 honeypy && \
chown -R honeypy:honeypy /opt/honeypy && \
setcap cap_net_bind_service=+ep /opt/honeypy/env/bin/python && \
#
# Clean up
apk del --purge build-base \
git \
python2-dev \
py2-pip && \
rm -rf /root/* && \
rm -rf /var/cache/apk/*
#
# Set workdir and start honeypy
USER honeypy:honeypy
WORKDIR /opt/honeypy
CMD ["/opt/honeypy/env/bin/python2", "/opt/honeypy/Honey.py", "-d"]

View file

@ -0,0 +1,117 @@
# HoneyPy/etc/honeypy.cfg
# https://github.com/foospidy/HoneyPy
[honeypy]
# select any name for this HoneyPy node, it can be anything you want (default is: honeypy).
# It will be displayed in tweets, Slack messages, and other integrations.
nodename = honeypy
#add a comma seperated list of ip addresses to supress logging of your local scanners
#whitelist = 192.168.0.5, 192.168.0.21
#include the following service profiles (comma seperated), all services will be combined.
#enabling this will disable the use of service.cfg, which will not be processed
#service_profiles = services.databases.profile, services.linux.profile
# Limit internal log files to a single day. Useful for deployments with limited disk space.
limit_internal_logs = Yes
# Directory for internal HoneyPy logs (not external loggers).
# Use leading slash for absolute path, or omit for relative path
internal_log_dir = log/
# Tweet events on Twitter. Having a dedicated Twitter account for this purpose is recommended.
# You will need to Twitter API credentials for this to work. See https://dev.twitter.com/oauth/application-only
[twitter]
enabled = No
consumerkey =
consumersecret =
oauthtoken =
oauthsecret =
########################################################################################################
# Animus is dead! (http://morris.guru/the-life-and-death-of-animus/) This feature should be use no more.
# enable tweats to include querying Animus Threat Bot (https://github.com/threatbot)
# ask_animus = No
########################################################################################################
#
# Animus rises from the ashes! https://animus.io/
#
########################################################################################################
#
# Animus falls again. https://github.com/hslatman/awesome-threat-intelligence/pull/101
#
########################################################################################################
# Post your events to HoneyDB. Your HoneyPy honepots can contribute threat information to HoneyDB.
# You will need to create API credentails for this to work. See https://riskdiscovery.com/honeydb/#threats
[honeydb]
enabled = No
api_id =
api_key =
# Post your events to a Slack channel. Having a dedicated Slack channel for this is recommended.
# For setting up your Slack webhook see https://api.slack.com/incoming-webhooks
[slack]
enabled = No
webhook_url =
[logstash]
enabled = No
host =
port =
[elasticsearch]
enabled = No
# Elasticsearch url should include ":port/index/type
# example: http://localhost:9200/honeypot/honeypy
es_url =
[telegram]
# You need to add your bot to channel or group, and get the bot token see https://core.telegram.org/bots
enabled = No
# Telegram bot HTTP API Token
bot_id =
[sumologic]
enabled = No
# create a http collector source and use the url provided
# https://help.sumologic.com/Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Data-to-an-HTTP-Source
url =
custom_source_host =
custom_source_name =
custom_source_category =
[splunk]
enabled = No
# /services/receivers/simple api endpoint
url = https://localhost:8089/services/receivers/simple
username =
password =
[rabbitmq]
enabled = No
# Here you need create rabbitmq config url to be used with pika python lib
# For ex. 1) amqp://username:password@rabbitmq_host/%2f
# 2) amqp://username:password@127.0.0.1/%2f
url_param =
# Name of the Rabbitmq Exchange
# Ex. mycoolexchange
exchange =
# Rabbitmq routing Key if not configured in rabbitmq leave it
# Ex. honeypy
routing_key =
[file]
enabled = Yes
filename = log/json.log
[hpfeeds]
enabled = No
persistent = Yes
server = 127.0.0.1
port = 20000
ident = ident
secret = secret
channel = channel
serverid = id

View file

@ -0,0 +1,67 @@
# HoneyPy Copyright (C) 2013-2017 foospidy
# services.default.profile
# Important: service names must not contain spaces.
# Important: use port redirecting for services that listen on ports below 1024 (see https://github.com/foospidy/ipt-kit).
[Echo]
plugin = Echo
low_port = tcp:7
port = tcp:7
description = Echo back data received via tcp.
enabled = Yes
[Echo.udp]
plugin = Echo_udp
low_port = udp:7
port = udp:7
description = Echo back data received via udp.
enabled = Yes
[MOTD]
plugin = MOTD
low_port = tcp:8
port = tcp:8
description = Send a message via tcp and close connection.
enabled = Yes
[MOTD.udp]
plugin = MOTD_udp
low_port = udp:8
port = udp:8
description = Send a message via udp.
enabled = Yes
[Telnet]
plugin = TelnetUnix
low_port = tcp:2323
port = tcp:2323
description = Emulate Debian telnet login via tcp.
enabled = Yes
[Telnet.Windows]
plugin = TelnetWindows
low_port = tcp:2324
port = tcp:2324
description = Emulate Windows telnet login via tcp.
enabled = Yes
[Random]
plugin = Random
low_port = tcp:2048
port = tcp:2048
description = Send random data via tcp.
enabled = Yes
[HashCountRandom]
plugin = HashCountRandom
low_port = tcp:4096
port = tcp:4096
description = Send random data prefixed with a hash of a counter via tcp.
enabled = Yes
[Elasticsearch]
plugin = Elasticsearch
low_port = tcp:9200
port = tcp:9200
description = Send basic elasticsearch like replies
enabled = Yes

View file

@ -0,0 +1,26 @@
version: '2.3'
networks:
honeypy_local:
services:
# HoneyPy service
honeypy:
build: .
container_name: honeypy
restart: always
networks:
- honeypy_local
ports:
- "7:7"
- "8:8"
- "2048:2048"
- "2323:2323"
- "2324:2324"
- "4096:4096"
- "9200:9200"
image: "dtagdevsec/honeypy:24.04"
read_only: true
volumes:
- /data/honeypy/log:/opt/honeypy/log

View file

@ -0,0 +1,42 @@
### This is only for testing purposes, do NOT use for production
FROM alpine:latest
#
ADD dist/ /root/dist/
#
# Install packages
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
apk -U --no-cache add \
build-base \
coreutils \
git \
libffi \
libffi-dev \
py-gevent \
py-pip \
python \
python-dev \
sqlite && \
#
# Install php sandbox from git
git clone --depth=1 https://github.com/rep/hpfeeds /opt/hpfeeds && \
cd /opt/hpfeeds/broker && \
sed -i -e '87d;88d' database.py && \
cp /root/dist/adduser.sql . && \
cd /opt/hpfeeds/broker && timeout 5 python broker.py || : && \
sqlite3 db.sqlite3 < adduser.sql && \
#
#python setup.py build && \
#python setup.py install && \
#
# Clean up
apk del --purge autoconf \
build-base \
coreutils \
libffi-dev \
python-dev && \
rm -rf /root/* && \
rm -rf /var/cache/apk/*
#
# Set workdir and start glastopf
WORKDIR /opt/hpfeeds/broker
CMD python broker.py

View file

@ -0,0 +1 @@
insert into authkeys (owner, ident, secret, pubchans, subchans) values ('testID', 'testID', 'testSecret', '["ciscoasa", "rdpy-channel", "mailoney.mail","mailoney.commands",mailoney.shellcode"]', '["ciscoasa", "rdpy-channel", "mailoney.mail","mailoney.commands",mailoney.shellcode"]');

View file

@ -0,0 +1,19 @@
### This is only for testing purposes, do NOT use for production
version: '2.3'
networks:
hpfeeds_local:
services:
# hpfeeds service
hpfeeds:
build: .
container_name: hpfeeds
restart: always
stop_signal: SIGKILL
networks:
- hpfeeds_local
ports:
- "20000:20000"
image: "ghcr.io/telekom-security/hpfeeds:latest"

View file

@ -1,33 +1,38 @@
FROM golang:1.23-alpine AS builder
#
ENV GO111MODULE=on \
CGO_ENABLED=0 \
GOOS=linux
FROM golang:1.21-alpine as builder
#
# Include dist
COPY dist/ /root/dist/
#
# Install packages
RUN apk --no-cache -U add \
# Setup apk
RUN apk -U add --no-cache \
build-base \
git \
g++
g++ && \
#
# Setup go, build dicompot
RUN git clone https://github.com/nsmfoo/dicompot.git && \
mkdir -p /opt/go && \
export GOPATH=/opt/go/ && \
cd /opt/go/ && \
git clone https://github.com/nsmfoo/dicompot.git && \
cd dicompot && \
git checkout 41331194156bbb17078bcc1594f4952ac06a731e && \
cp /root/dist/go.mod .
WORKDIR /go/dicompot
RUN go mod tidy
RUN go mod download
RUN go build -o dicompot github.com/nsmfoo/dicompot/server
go mod download && \
go install -a -x github.com/nsmfoo/dicompot/server
#
FROM scratch
FROM alpine:3.19
#
COPY --from=builder /go/dicompot/dicompot /opt/dicompot/dicompot
# Setup dicompot
#
COPY --from=builder /opt/go/bin/server /opt/dicompot/server
COPY --from=builder /root/dist/dcm_pts/images /opt/dicompot/images
#
# Setup user, groups and configs
#
RUN addgroup -g 2000 dicompot && \
adduser -S -s /bin/ash -u 2000 -D -g 2000 dicompot && \
chown -R dicompot:dicompot /opt/dicompot
#
# Start dicompot
WORKDIR /opt/dicompot
USER 2000:2000
CMD ["-ip","0.0.0.0","-dir","images","-log","/var/log/dicompot/dicompot.log"]
ENTRYPOINT ["./dicompot"]
USER dicompot:dicompot
CMD ["./server","-ip","0.0.0.0","-dir","images","-log","/var/log/dicompot/dicompot.log"]

View file

@ -1,25 +0,0 @@
module github.com/nsmfoo/dicompot
go 1.23
require (
github.com/grailbio/go-dicom v0.0.0-20190117035129-c30d9eaca591
github.com/mattn/go-colorable v0.1.6
github.com/sirupsen/logrus v1.6.0
github.com/snowzach/rotatefilehook v0.0.0-20180327172521-2f64f265f58c
)
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/gobwas/glob v0.0.0-20170212200151-51eb1ee00b6d // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
golang.org/x/sys v0.1.0 // indirect
golang.org/x/text v0.3.8 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.3.0 // indirect
)
replace github.com/nsmfoo/dicompot => ../dicompot
replace github.com/golang/lint => ../../golang/lint

View file

@ -1,3 +1,5 @@
version: '2.3'
networks:
dicompot_local:
@ -16,7 +18,6 @@ services:
networks:
- dicompot_local
ports:
- "104:11112"
- "11112:11112"
image: "dtagdevsec/dicompot:24.04"
read_only: true

View file

@ -1,4 +1,4 @@
FROM ubuntu:24.04
FROM ubuntu:22.04
ENV DEBIAN_FRONTEND noninteractive
#
# Include dist
@ -10,13 +10,11 @@ RUN ARCH=$(arch) && \
if [ "$ARCH" = "aarch64" ]; then ARCH="arm64"; fi && \
echo "$ARCH" && \
cd /root/dist/ && \
# Setup apt
apt-get update -y && \
apt-get upgrade -y && \
apt-get install wget -y && \
wget http://ftp.us.debian.org/debian/pool/main/libe/libemu/libemu2_0.2.0+git20120122-1.2+b1_$ARCH.deb \
http://ftp.us.debian.org/debian/pool/main/libe/libemu/libemu-dev_0.2.0+git20120122-1.2+b1_$ARCH.deb && \
apt-get install ./libemu2_0.2.0+git20120122-1.2+b1_$ARCH.deb \
apt install ./libemu2_0.2.0+git20120122-1.2+b1_$ARCH.deb \
./libemu-dev_0.2.0+git20120122-1.2+b1_$ARCH.deb -y && \
apt-get install -y --no-install-recommends \
build-essential \
@ -41,13 +39,14 @@ RUN ARCH=$(arch) && \
python3-dev \
python3-boto3 \
python3-bson \
python3-setuptools \
python3-yaml \
fonts-liberation && \
#
# Get and install dionaea
git clone https://github.com/t3chn0m4g3/dionaea -b 0.11.1 /root/dionaea/ && \
# git clone --depth=1 https://github.com/dinotools/dionaea -b 0.11.0 /root/dionaea/ && \
git clone --depth=1 https://github.com/dinotools/dionaea /root/dionaea/ && \
cd /root/dionaea && \
git checkout 4e459f1b672a5b4c1e8335c0bff1b93738019215 && \
mkdir build && \
cd build && \
cmake -DCMAKE_INSTALL_PREFIX:PATH=/opt/dionaea .. && \
@ -57,7 +56,7 @@ RUN ARCH=$(arch) && \
# Setup user and groups
addgroup --gid 2000 dionaea && \
adduser --system --no-create-home --shell /bin/bash --uid 2000 --disabled-password --disabled-login --gid 2000 dionaea && \
setcap cap_net_bind_service=+ep /opt/dionaea/sbin/dionaea && \
setcap cap_net_bind_service=+ep /opt/dionaea/bin/dionaea && \
#
# Supply configs and set permissions
chown -R dionaea:dionaea /opt/dionaea/var && \
@ -105,17 +104,12 @@ RUN ARCH=$(arch) && \
libnetfilter-queue1 \
libnl-3-200 \
libpcap0.8 \
libpython3.12 \
libpython3.10 \
libudns0 && \
#
apt-get autoremove --purge -y && \
apt-get clean && \
rm -rf /root/* \
/var/lib/apt/lists/* \
/tmp/* \
/var/tmp/* \
/root/.cache \
/opt/dionaea/.git
rm -rf /root/* /var/lib/apt/lists/* /tmp/* /var/tmp/* /root/.cache /opt/dionaea/.git
#
# Start dionaea
STOPSIGNAL SIGINT
@ -123,4 +117,4 @@ STOPSIGNAL SIGINT
# Dionaea sometimes hangs at 100% CPU usage, if detected container will become unhealthy and restarted by tpotinit
HEALTHCHECK --interval=5m --timeout=30s --retries=3 CMD python3 /cpu_check.py $(pgrep -of dionaea) 99
USER dionaea:dionaea
CMD ["/opt/dionaea/sbin/dionaea", "-u", "dionaea", "-g", "dionaea", "-c", "/opt/dionaea/etc/dionaea/dionaea.cfg"]
CMD ["/opt/dionaea/bin/dionaea", "-u", "dionaea", "-g", "dionaea", "-c", "/opt/dionaea/etc/dionaea/dionaea.cfg"]

View file

@ -1,3 +1,5 @@
version: '2.3'
networks:
dionaea_local:

200
docker/docker-compose.yml Normal file
View file

@ -0,0 +1,200 @@
# T-Pot Image Builder (use only for building docker images)
version: '2.3'
services:
##################
#### Honeypots
##################
# Adbhoney service
adbhoney:
build: adbhoney/.
image: "dtagdevsec/adbhoney:24.04"
# Ciscoasa service
ciscoasa:
build: ciscoasa/.
image: "dtagdevsec/ciscoasa:24.04"
# CitrixHoneypot service
citrixhoneypot:
build: citrixhoneypot/.
image: "dtagdevsec/citrixhoneypot:24.04"
# Conpot IEC104 service
conpot_IEC104:
build: conpot/.
image: "dtagdevsec/conpot:24.04"
# Cowrie service
cowrie:
build: cowrie/.
image: "dtagdevsec/cowrie:24.04"
# Ddospot service
ddospot:
build: ddospot/.
image: "dtagdevsec/ddospot:24.04"
# Dicompot service
dicompot:
build: dicompot/.
image: "dtagdevsec/dicompot:24.04"
# Dionaea service
dionaea:
build: dionaea/.
image: "dtagdevsec/dionaea:24.04"
# ElasticPot service
elasticpot:
build: elasticpot/.
image: "dtagdevsec/elasticpot:24.04"
# Endlessh service
endlessh:
build: endlessh/.
image: "dtagdevsec/endlessh:24.04"
# Glutton service
glutton:
build: glutton/.
image: "dtagdevsec/glutton:24.04"
# Hellpot service
hellpot:
build: hellpot/.
image: "dtagdevsec/hellpot:24.04"
# Heralding service
heralding:
build: heralding/.
image: "dtagdevsec/heralding:24.04"
# Honeypots service
honeypots:
build: honeypots/.
image: "dtagdevsec/honeypots:24.04"
# Honeytrap service
honeytrap:
build: honeytrap/.
image: "dtagdevsec/honeytrap:24.04"
# IPPHoney service
ipphoney:
build: ipphoney/.
image: "dtagdevsec/ipphoney:24.04"
# Log4Pot service
log4pot:
build: log4pot/.
image: "dtagdevsec/log4pot:24.04"
# Mailoney service
mailoney:
build: mailoney/.
image: "dtagdevsec/mailoney:24.04"
# Medpot service
medpot:
build: medpot/.
image: "dtagdevsec/medpot:24.04"
# Redishoneypot service
redishoneypot:
build: redishoneypot/.
image: "dtagdevsec/redishoneypot:24.04"
# Sentrypeer service
sentrypeer:
build: sentrypeer/.
image: "dtagdevsec/sentrypeer:24.04"
#### Snare / Tanner
## Tanner Redis Service
tanner_redis:
build: tanner/redis/.
image: "dtagdevsec/redis:24.04"
## PHP Sandbox service
tanner_phpox:
build: tanner/phpox/.
image: "dtagdevsec/phpox:24.04"
## Tanner API Service
tanner_api:
build: tanner/tanner/.
image: "dtagdevsec/tanner:24.04"
## Snare Service
snare:
build: tanner/snare/.
image: "dtagdevsec/snare:24.04"
## Wordpot Service
wordpot:
build: wordpot/.
image: "dtagdevsec/wordpot:24.04"
##################
#### NSM
##################
# Fatt service
fatt:
build: fatt/.
image: "dtagdevsec/fatt:24.04"
# P0f service
p0f:
build: p0f/.
image: "dtagdevsec/p0f:24.04"
# Suricata service
suricata:
build: suricata/.
image: "dtagdevsec/suricata:24.04"
##################
#### Tools
##################
#### ELK
## Elasticsearch service
elasticsearch:
build: elk/elasticsearch/.
image: "dtagdevsec/elasticsearch:24.04"
## Kibana service
kibana:
build: elk/kibana/.
image: "dtagdevsec/kibana:24.04"
## Logstash service
logstash:
build: elk/logstash/.
image: "dtagdevsec/logstash:24.04"
# Ewsposter service
ewsposter:
build: ewsposter/.
image: "dtagdevsec/ewsposter:24.04"
# Nginx service
nginx:
build: nginx/.
image: "dtagdevsec/nginx:24.04"
# Spiderfoot service
spiderfoot:
build: spiderfoot/.
image: "dtagdevsec/spiderfoot:24.04"
# Map Web Service
map_web:
build: elk/map/.
image: "dtagdevsec/map:24.04"

View file

@ -1,11 +1,10 @@
FROM alpine:3.20 AS builder
FROM alpine:3.19
#
# Include dist
COPY dist/ /root/dist/
#
# Install packages
RUN apk --no-cache -U upgrade && \
apk --no-cache -U add \
RUN apk -U --no-cache add \
build-base \
ca-certificates \
git \
@ -32,24 +31,28 @@ RUN apk --no-cache -U upgrade && \
mkdir -p /opt && \
cd /opt/ && \
git clone https://gitlab.com/bontchev/elasticpot.git/ && \
cd elasticpot && \
git checkout d12649730d819bd78ea622361b6c65120173ad45 && \
cp /root/dist/requirements.txt .
WORKDIR /opt/elasticpot
RUN pip3 install --break-system-packages pyinstaller
RUN pip3 install --break-system-packages -r requirements.txt
RUN pyinstaller elasticpot.py \
--hidden-import output_plugins \
--hidden-import output_plugins.jsonlog
cd elasticpot && \
git checkout d12649730d819bd78ea622361b6c65120173ad45 && \
cp /root/dist/requirements.txt . && \
pip3 install --break-system-packages -r requirements.txt && \
#
FROM alpine:3.20
RUN apk --no-cache -U upgrade
COPY --from=builder /opt/elasticpot/dist/ /opt/
COPY --from=builder /opt/elasticpot/responses/ /opt/elasticpot/responses/
COPY --from=builder /root/dist/honeypot.cfg /opt/elasticpot/etc/
# Setup user, groups and configs
addgroup -g 2000 elasticpot && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 elasticpot && \
mv /root/dist/honeypot.cfg /opt/elasticpot/etc/ && \
#
# Clean up
apk del --purge build-base \
git \
libffi-dev \
openssl-dev \
postgresql-dev \
python3-dev && \
rm -rf /root/* && \
rm -rf /var/cache/apk/* /opt/elasticpot/.git
#
# Start elasticpot
STOPSIGNAL SIGINT
USER 2000:2000
USER elasticpot:elasticpot
WORKDIR /opt/elasticpot/
CMD ["./elasticpot"]
CMD ["/usr/bin/python3","elasticpot.py"]

View file

@ -1,3 +1,5 @@
version: '2.3'
networks:
elasticpot_local:

View file

@ -1,3 +1,5 @@
version: '2.3'
services:
# ELK services

View file

@ -1,29 +1,29 @@
FROM ubuntu:24.04
ENV DEBIAN_FRONTEND noninteractive
ENV ES_VER=8.17.5
FROM ubuntu:22.04
#
# VARS
ENV ES_VER=8.12.2
#
# Include dist
COPY dist/ /root/dist/
#
# Install packages
RUN apt-get update -y && \
apt-get upgrade -y && \
RUN apt-get update -y && \
apt-get install -y \
adduser \
aria2 \
curl && \
#
# Determine arch, get and install packages
ARCH=$(arch) && \
if [ "$ARCH" = "x86_64" ]; then ES_ARCH="amd64"; export _JAVA_OPTIONS=""; fi && \
if [ "$ARCH" = "aarch64" ]; then ES_ARCH="arm64"; export _JAVA_OPTIONS="-XX:UseSVE=0"; fi && \
if [ "$ARCH" = "x86_64" ]; then ES_ARCH="amd64"; fi && \
if [ "$ARCH" = "aarch64" ]; then ES_ARCH="arm64"; fi && \
echo "$ARCH" && \
cd /root/dist/ && \
mkdir -p /usr/share/elasticsearch/config \
/etc/elasticsearch && \
cp elasticsearch.yml /etc/elasticsearch/ && \
aria2c -s 16 -x 16 https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-$ES_VER-$ES_ARCH.deb && \
dpkg --force-confold -i elasticsearch-$ES_VER-$ES_ARCH.deb && \
dpkg -i elasticsearch-$ES_VER-$ES_ARCH.deb && \
#
# Add and move files
# rm -rf /usr/share/elasticsearch/modules/x-pack-ml && \
mkdir -p /usr/share/elasticsearch/config && \
cp elasticsearch.yml /etc/elasticsearch/ && \
#
# Setup user, groups and configs
groupmod -g 2000 elasticsearch && \
@ -36,18 +36,11 @@ RUN apt-get update -y && \
# Clean up
apt-get purge aria2 -y && \
apt-get autoremove -y --purge && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* \
/tmp/* /var/tmp/* \
/root/.cache \
/root/*
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /root/.cache /root/*
#
# Healthcheck
HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:9200/_cat/health'
#
# Start ELK
USER elasticsearch:elasticsearch
#CMD ["/usr/share/elasticsearch/bin/elasticsearch"]
CMD ARCH=$(arch) && \
if [ "$ARCH" = "aarch64" ]; then export _JAVA_OPTIONS="-XX:UseSVE=0"; fi && \
exec /usr/share/elasticsearch/bin/elasticsearch
CMD ["/usr/share/elasticsearch/bin/elasticsearch"]

View file

@ -1,3 +1,5 @@
version: '2.3'
services:
# ELK services
@ -22,6 +24,6 @@ services:
mem_limit: 2g
ports:
- "127.0.0.1:64298:9200"
image: "ghcr.io/telekom-security/elasticsearch:24.04.1"
image: "dtagdevsec/elasticsearch:24.04"
volumes:
- $HOME/tpotce/data:/data

View file

@ -1,31 +1,28 @@
FROM node:20.18.2-alpine3.20
ENV KB_VER=8.17.5
FROM ubuntu:22.04
#
# VARS
ENV KB_VER=8.12.2
# Include dist
COPY dist/ /root/dist/
#
# Install packages
RUN apk --no-cache -U upgrade && \
apk --no-cache -U add \
RUN apt-get update -y && \
apt-get install -y \
aria2 \
curl \
gcompat && \
curl && \
#
# Determine arch, get and install packages
ARCH=$(arch) && \
if [ "$ARCH" = "x86_64" ]; then KB_ARCH="x86_64"; fi && \
if [ "$ARCH" = "aarch64" ]; then KB_ARCH="aarch64"; fi && \
if [ "$ARCH" = "x86_64" ]; then KB_ARCH="amd64"; fi && \
if [ "$ARCH" = "aarch64" ]; then KB_ARCH="arm64"; fi && \
echo "$ARCH" && \
cd /root/dist/ && \
aria2c -s 16 -x 16 https://artifacts.elastic.co/downloads/kibana/kibana-$KB_VER-linux-$KB_ARCH.tar.gz && \
mkdir -p /usr/share/kibana && \
tar xvfz kibana-$KB_VER-linux-$KB_ARCH.tar.gz --strip-components=1 -C /usr/share/kibana/ && \
#
# Kibana's bundled node does not work in build pipeline
rm /usr/share/kibana/node/glibc-217/bin/node && \
ln -s /usr/local/bin/node /usr/share/kibana/node/glibc-217/bin/node && \
aria2c -s 16 -x 16 https://artifacts.elastic.co/downloads/kibana/kibana-$KB_VER-$KB_ARCH.deb && \
dpkg -i kibana-$KB_VER-$KB_ARCH.deb && \
#
# Setup user, groups and configs
mkdir -p /usr/share/kibana/config \
/usr/share/kibana/data && \
cp /etc/kibana/kibana.yml /usr/share/kibana/config && \
sed -i 's/#server.basePath: ""/server.basePath: "\/kibana"/' /usr/share/kibana/config/kibana.yml && \
sed -i 's/#server.host: "localhost"/server.host: "0.0.0.0"/' /usr/share/kibana/config/kibana.yml && \
sed -i 's/#elasticsearch.hosts: \["http:\/\/localhost:9200"\]/elasticsearch.hosts: \["http:\/\/elasticsearch:9200"\]/' /usr/share/kibana/config/kibana.yml && \
@ -33,20 +30,23 @@ RUN apk --no-cache -U upgrade && \
echo "xpack.reporting.roles.enabled: false" >> /usr/share/kibana/config/kibana.yml && \
echo "elasticsearch.requestTimeout: 60000" >> /usr/share/kibana/config/kibana.yml && \
echo "elasticsearch.shardTimeout: 60000" >> /usr/share/kibana/config/kibana.yml && \
echo "unifiedSearch.autocomplete.valueSuggestions.timeout: 60000" >> /usr/share/kibana/config/kibana.yml && \
echo "unifiedSearch.autocomplete.valueSuggestions.terminateAfter: 1000000" >> /usr/share/kibana/config/kibana.yml && \
echo "kibana.autocompleteTimeout: 60000" >> /usr/share/kibana/config/kibana.yml && \
echo "kibana.autocompleteTerminateAfter: 1000000" >> /usr/share/kibana/config/kibana.yml && \
rm -rf /usr/share/kibana/optimize/bundles/* && \
/usr/share/kibana/bin/kibana --optimize --allow-root && \
addgroup -g 2000 kibana && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 kibana && \
chown -R kibana:kibana /usr/share/kibana/data && \
groupmod -g 2000 kibana && \
usermod -u 2000 kibana && \
chown -R root:kibana /etc/kibana && \
chown -R kibana:kibana /usr/share/kibana/data \
/run/kibana \
/var/log/kibana \
/var/lib/kibana && \
chmod 755 -R /usr/share/kibana/config && \
#
# Clean up
apk del --purge aria2 && \
rm -rf /root/* \
/tmp/* \
/var/cache/apk/*
apt-get purge aria2 -y && \
apt-get autoremove -y --purge && \
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /root/.cache /root/*
#
# Healthcheck
HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:5601'

View file

@ -1,3 +1,5 @@
version: '2.3'
services:
## Kibana service
@ -10,4 +12,4 @@ services:
# condition: service_healthy
ports:
- "127.0.0.1:64296:5601"
image: "ghcr.io/telekom-security/kibana:24.04.1"
image: "dtagdevsec/kibana:24.04"

View file

@ -1,15 +1,13 @@
FROM ubuntu:24.04
ENV DEBIAN_FRONTEND=noninteractive
ENV LS_VER=8.17.5
FROM ubuntu:22.04
#
# VARS
ENV LS_VER=8.12.2
# Include dist
COPY dist/ /root/dist/
#
# Install packages
# Setup env and apt
RUN apt-get update -y && \
apt-get upgrade -y && \
apt-get install -y \
adduser \
aria2 \
bash \
bzip2 \
@ -17,8 +15,8 @@ RUN apt-get update -y && \
#
# Determine arch, get and install packages
ARCH=$(arch) && \
if [ "$ARCH" = "x86_64" ]; then LS_ARCH="amd64"; export _JAVA_OPTIONS=""; fi && \
if [ "$ARCH" = "aarch64" ]; then LS_ARCH="arm64"; export _JAVA_OPTIONS="-XX:UseSVE=0"; fi && \
if [ "$ARCH" = "x86_64" ]; then LS_ARCH="amd64"; fi && \
if [ "$ARCH" = "aarch64" ]; then LS_ARCH="arm64"; fi && \
echo "$ARCH" && \
mkdir -p /etc/listbot && \
cd /etc/listbot && \
@ -28,6 +26,7 @@ RUN apt-get update -y && \
cd /root/dist/ && \
aria2c -s 16 -x 16 https://artifacts.elastic.co/downloads/logstash/logstash-$LS_VER-$LS_ARCH.deb && \
dpkg -i logstash-$LS_VER-$LS_ARCH.deb && \
# /usr/share/logstash/bin/logstash-plugin install logstash-output-gelf logstash-output-syslog && \
#
# Add and move files
cd /root/dist/ && \
@ -57,15 +56,11 @@ RUN apt-get update -y && \
#
# Clean up
apt-get autoremove -y --purge && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* \
/tmp/* /var/tmp/* \
/root/.cache \
/root/*
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /root/.cache /root/*
#
# Healthcheck
HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:9600'
#
# Start logstash
USER logstash:logstash
CMD ["entrypoint.sh"]
CMD entrypoint.sh

View file

@ -10,10 +10,7 @@ trap fuCLEANUP EXIT
if [ -f "/data/tpot/etc/compose/elk_environment" ];
then
echo "Found .env, now exporting ..."
set -o allexport
source "/data/tpot/etc/compose/elk_environment"
LS_SSL_VERIFICATION="${LS_SSL_VERIFICATION:-full}"
set +o allexport
set -o allexport && source "/data/tpot/etc/compose/elk_environment" && set +o allexport
fi
# Check internet availability
@ -53,7 +50,6 @@ if [ "$TPOT_TYPE" == "SENSOR" ];
echo
echo "T-Pot type: $TPOT_TYPE"
echo "Hive IP: $TPOT_HIVE_IP"
echo "SSL verification: $LS_SSL_VERIFICATION"
echo
# Ensure correct file permissions for private keyfile or SSH will ask for password
cp /usr/share/logstash/config/pipelines_sensor.yml /usr/share/logstash/config/pipelines.yml
@ -101,8 +97,4 @@ if [ "$TPOT_TYPE" != "SENSOR" ];
fi
echo
ARCH=$(arch)
if [ "$ARCH" = "aarch64" ]; then
export _JAVA_OPTIONS="-XX:UseSVE=0";
fi
exec /usr/share/logstash/bin/logstash --config.reload.automatic

View file

@ -29,13 +29,6 @@ input {
type => "Adbhoney"
}
# Beelzebub
file {
path => ["/data/beelzebub/log/beelzebub.json"]
codec => json
type => "Beelzebub"
}
# Ciscoasa
file {
path => ["/data/ciscoasa/log/ciscoasa.log"]
@ -99,13 +92,6 @@ input {
type => "Endlessh"
}
# Galah
file {
path => ["/data/galah/log/galah.json"]
codec => json
type => "Galah"
}
# Glutton
file {
path => ["/data/glutton/log/glutton.log"]
@ -113,20 +99,6 @@ input {
type => "Glutton"
}
# Go-pot
file {
path => ["/data/go-pot/log/go-pot.json"]
codec => json
type => "Go-pot"
}
# H0neytr4p
file {
path => ["/data/h0neytr4p/log/log.json"]
codec => json
type => "H0neytr4p"
}
# Hellpot
file {
path => ["/data/hellpot/log/hellpot.log"]
@ -140,13 +112,6 @@ input {
type => "Heralding"
}
# Honeyaml
file {
path => ["/data/honeyaml/log/honeyaml.log"]
codec => json
type => "Honeyaml"
}
# Honeypots
file {
path => ["/data/honeypots/log/*.log"]
@ -175,13 +140,6 @@ input {
type => "Log4pot"
}
# Miniprint
file {
path => ["/data/miniprint/log/miniprint.json"]
codec => json
type => "Miniprint"
}
# Mailoney
file {
path => ["/data/mailoney/log/commands.log"]
@ -245,9 +203,9 @@ filter {
mutate {
rename => {
"sourceIp" => "src_ip"
"destinationIp" => "dest_ip"
"sourcePort" => "src_port"
"destinationPort" => "dest_port"
"destinationIp" => "dest_ip"
"sourcePort" => "src_port"
"destinationPort" => "dest_port"
"gquic" => "fatt_gquic"
"http" => "fatt_http"
"rdp" => "fatt_rdp"
@ -295,13 +253,6 @@ filter {
}
}
# Beelzebub
if [type] == "Beelzebub" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Ciscoasa
if [type] == "Ciscoasa" {
kv {
@ -495,13 +446,6 @@ filter {
}
}
# Galah
if [type] == "Galah" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Glutton
if [type] == "Glutton" {
date {
@ -510,22 +454,6 @@ filter {
}
}
# Go-pot
if [type] == "Go-pot" {
if ! [dest_port] {
drop {}
}
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
remove_field => ["ts"]
rename => {
"duration" => "session_duration"
}
}
}
# Hellpot
if [type] == "Hellpot" {
date {
@ -549,13 +477,6 @@ filter {
}
}
# H0neytr4p
if [type] == "H0neytr4p" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Heralding
if [type] == "Heralding" {
csv {
@ -567,13 +488,6 @@ filter {
}
}
# Honeyaml
if [type] == "Honeyaml" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Honeypots
if [type] == "Honeypots" {
date {
@ -653,13 +567,6 @@ filter {
}
}
# Miniprint
if [type] == "Miniprint" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Redishoneypot
if [type] == "Redishoneypot" {
date {
@ -816,9 +723,7 @@ output {
codec => "json"
format => "json_batch"
url => "https://${TPOT_HIVE_IP}:64294"
# cacert => "/data/hive.crt"
ssl_verification_mode => "${LS_SSL_VERIFICATION}"
ssl_certificate_authorities => "/data/hive.crt"
cacert => "/data/hive.crt"
headers => {
"Authorization" => "Basic ${TPOT_HIVE_USER}"
}

View file

@ -29,13 +29,6 @@ input {
type => "Adbhoney"
}
# Beelzebub
file {
path => ["/data/beelzebub/log/beelzebub.json"]
codec => json
type => "Beelzebub"
}
# Ciscoasa
file {
path => ["/data/ciscoasa/log/ciscoasa.log"]
@ -99,13 +92,6 @@ input {
type => "Endlessh"
}
# Galah
file {
path => ["/data/galah/log/galah.json"]
codec => json
type => "Galah"
}
# Glutton
file {
path => ["/data/glutton/log/glutton.log"]
@ -113,20 +99,6 @@ input {
type => "Glutton"
}
# Go-pot
file {
path => ["/data/go-pot/log/go-pot.json"]
codec => json
type => "Go-pot"
}
# H0neytr4p
file {
path => ["/data/h0neytr4p/log/log.json"]
codec => json
type => "H0neytr4p"
}
# Hellpot
file {
path => ["/data/hellpot/log/hellpot.log"]
@ -140,13 +112,6 @@ input {
type => "Heralding"
}
# Honeyaml
file {
path => ["/data/honeyaml/log/honeyaml.log"]
codec => json
type => "Honeyaml"
}
# Honeypots
file {
path => ["/data/honeypots/log/*.log"]
@ -189,13 +154,6 @@ input {
type => "Medpot"
}
# Miniprint
file {
path => ["/data/miniprint/log/miniprint.json"]
codec => json
type => "Miniprint"
}
# Redishoneypot
file {
path => ["/data/redishoneypot/log/redishoneypot.log"]
@ -295,13 +253,6 @@ filter {
}
}
# Beelzebub
if [type] == "Beelzebub" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Ciscoasa
if [type] == "Ciscoasa" {
kv {
@ -495,29 +446,6 @@ filter {
}
}
# Galah
if [type] == "Galah" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Go-pot
if [type] == "Go-pot" {
if ! [dest_port] {
drop {}
}
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
remove_field => ["ts"]
rename => {
"duration" => "session_duration"
}
}
}
# Glutton
if [type] == "Glutton" {
date {
@ -549,13 +477,6 @@ filter {
}
}
# H0neytr4p
if [type] == "H0neytr4p" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Heralding
if [type] == "Heralding" {
csv {
@ -567,13 +488,6 @@ filter {
}
}
# Honeyaml
if [type] == "Honeyaml" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Honeypots
if [type] == "Honeypots" {
date {
@ -653,13 +567,6 @@ filter {
}
}
# Miniprint
if [type] == "Miniprint" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Redishoneypot
if [type] == "Redishoneypot" {
date {

View file

@ -1,3 +1,5 @@
version: '2.3'
services:
## Logstash service
@ -12,7 +14,7 @@ services:
# condition: service_healthy
ports:
- "127.0.0.1:64305:64305"
image: "ghcr.io/telekom-security/logstash:24.04.1"
image: "dtagdevsec/logstash:24.04"
volumes:
- $HOME/tpotce/data:/data
# - /$HOME/tpotce/docker/elk/logstash/dist/logstash.conf:/etc/logstash/conf.d/logstash.conf

View file

@ -1,8 +1,7 @@
FROM alpine:3.20
FROM alpine:3.19
#
# Install packages
RUN apk --no-cache -U upgrade && \
apk --no-cache -U add \
RUN apk -U --no-cache add \
build-base \
git \
libcap \
@ -14,7 +13,7 @@ RUN apk --no-cache -U upgrade && \
# Install from GitHub and setup
mkdir -p /opt && \
cd /opt/ && \
git clone https://github.com/t3chn0m4g3/t-pot-attack-map -b 2.2.6 && \
git clone https://github.com/t3chn0m4g3/t-pot-attack-map -b 2.2.0 && \
cd t-pot-attack-map && \
pip3 install --break-system-packages --upgrade pip && \
pip3 install --break-system-packages -r requirements.txt && \
@ -29,12 +28,11 @@ RUN apk --no-cache -U upgrade && \
apk del --purge build-base \
git \
python3-dev && \
rm -rf /root/* /var/cache/apk/* \
/opt/t-pot-attack-map/.git
rm -rf /root/* /var/cache/apk/* /opt/t-pot-attack-map/.git
#
# Start T-Pot-Attack-Map
ENV TZ=UTC
STOPSIGNAL SIGINT
USER map:map
WORKDIR /opt/t-pot-attack-map
CMD ["/bin/sh", "-c", "/usr/bin/python3 $MAP_COMMAND"]
CMD /usr/bin/python3 $MAP_COMMAND

View file

@ -1,3 +1,5 @@
version: '2.3'
#networks:
# map_local:

View file

@ -1,7 +1,7 @@
FROM alpine:3.16 AS builder
FROM alpine:3.16 as builder
#
# Include dist
COPY dist/ /root/dist/
ADD dist/ /root/dist/
#
# Install packages
RUN apk -U add --no-cache \
@ -16,11 +16,27 @@ RUN apk -U add --no-cache \
make && \
mv /opt/endlessh/endlessh /root/dist
#
FROM alpine:3.20
RUN apk --no-cache -U upgrade
FROM alpine:3.19
#
COPY --from=builder /root/dist/* /opt/endlessh/
#
# Install packages
RUN apk -U add --no-cache \
libcap && \
#
# Setup user, groups and configs
mkdir -p /var/log/endlessh && \
addgroup -g 2000 endlessh && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 endlessh && \
chown -R endlessh:endlessh /opt/endlessh && \
#setcap cap_net_bind_service=+ep /usr/bin/python3.8 && \
#
# Clean up
rm -rf /root/* && \
rm -rf /var/cache/apk/*
#
# Set workdir and start endlessh
STOPSIGNAL SIGINT
USER 2000:2000
USER endlessh:endlessh
WORKDIR /opt/endlessh/
CMD ./endlessh -f endlessh.conf >/var/log/endlessh/endlessh.log

View file

@ -1,3 +1,5 @@
version: '2.3'
networks:
endlessh_local:

View file

@ -1,11 +1,10 @@
FROM alpine:3.21
FROM alpine:3.19
#
# Include dist
COPY dist/ /root/dist/
#
# Install packages
RUN apk --no-cache -U upgrade && \
apk --no-cache -U add \
RUN apk -U --no-cache add \
build-base \
git \
libffi-dev \
@ -22,19 +21,13 @@ RUN apk --no-cache -U upgrade && \
py3-requests \
py3-pip \
py3-setuptools \
py3-wheel \
py3-xmltodict && \
py3-wheel && \
pip3 install --break-system-packages --upgrade pip && \
pip3 install --break-system-packages --no-cache-dir \
configparser \
hpfeeds3 \
influxdb \
influxdb-client \
xmljson && \
pip3 install --break-system-packages --no-cache-dir configparser hpfeeds3 influxdb influxdb-client xmljson && \
#
# Setup ewsposter
git clone https://github.com/telekom-security/ewsposter /opt/ewsposter && \
# git clone https://github.com/telekom-security/ewsposter -b v1.32 /opt/ewsposter && \
git clone https://github.com/telekom-security/ewsposter -b v1.25.0 /opt/ewsposter && \
mkdir -p /opt/ewsposter/spool /opt/ewsposter/log && \
#
# Setup user and groups
@ -53,7 +46,7 @@ RUN apk --no-cache -U upgrade && \
openssl-dev \
python3-dev \
py-setuptools && \
rm -rf /root/* /var/cache/apk/* /opt/ewsposter/.git
rm -rf /root/* /var/cache/apk/* /opt/ewsposter/.git
#
# Run ewsposter
STOPSIGNAL SIGINT

View file

@ -44,104 +44,23 @@ token = <your token for influx 2.0>
bucket = <your bucket/database for 2.0/1.8>
org = <your org for influx 2.0>
[ADBHONEY]
adbhoney = true
nodeid = adbhoney-community-01
logfile = /data/adbhoney/log/adbhoney.json
malwaredir = /data/adbhoney/downloads
[BEELZEBUB]
beelzebub = true
nodeid = beelzebub-community-01
logfile = /data/beelzebub/log/beelzebub.json
[CISCOASA]
ciscoasa = true
nodeid = ciscoasa-community-01
logfile = /data/ciscoasa/log/ciscoasa.log
[CITRIX]
citrix = true
nodeid = citrix-community-01
logfile = /data/citrixhoneypot/logs/server.log
[CONPOT]
conpot = true
nodeid = conpot-community-01
logdir = /data/conpot/log
[GLASTOPFV3]
glastopfv3 = false
nodeid = glastopfv3-community-01
sqlitedb = /data/glastopf/db/glastopf.db
malwaredir = /data/glastopf/data/files/
[COWRIE]
cowrie = true
nodeid = cowrie-community-01
logfile = /data/cowrie/log/cowrie.json
[DDOSPOT]
ddospot = true
nodeid = ddospot-community-01
logdir = /data/ddospot/log
[DICOMPOT]
dicompot = true
nodeid = dicompot-community-01
logfile = /data/dicompot/log/dicompot.log
[DIONAEA]
dionaea = true
nodeid = dionaea-community-01
malwaredir = /data/dionaea/binaries/
sqlitedb = /data/dionaea/log/dionaea.sqlite
[ELASTICPOT]
elasticpot = true
nodeid = elasticpot-community-01
logfile = /data/elasticpot/log/elasticpot.json
[ENDLESSH]
endlessh = true
nodeid = endlessh-community-01
logfile = /data/endlessh/log/endlessh.log
[GALAH]
galah = true
nodeid = galah-community-01
logfile = /data/galah/log/galah.json
[GLUTTON]
glutton = true
nodeid = glutton-community-01
logfile = /data/glutton/log/glutton.log
[GOPOT]
gopot = true
nodeid = gopot-community-01
logfile = /data/go-pot/log/go-pot.json
[H0NEYTR4P]
h0neytr4p = true
nodeid = h0neytr4p-community-01
logfile = /data/h0neytr4p/log/log.json
payloaddir = /data/h04neytr4p/payload
[HELLPOT]
hellpot = true
nodeid = hellpot-community-01
logfile = /data/hellpot/log/hellpot.log
[HERALDING]
heralding = true
nodeid = heralding-community-01
logfile = /data/heralding/log/auth.csv
[HONEYAML]
honeyaml = true
nodeid = honeyaml-community-01
logfile = /data/honeyaml/log/honeyaml.log
[HONEYPOTS]
honeypots = true
nodeid = honeypots-community-01
logdir = /data/honeypots/log
[HONEYTRAP]
honeytrap = true
nodeid = honeytrap-community-01
@ -149,47 +68,118 @@ newversion = true
payloaddir = /data/honeytrap/attacks/
attackerfile = /data/honeytrap/log/attacker.log
[IPPHONEY]
ipphoney = true
nodeid = ipphoney-community-01
logfile = /data/ipphoney/log/ipphoney.json
[EMOBILITY]
eMobility = false
nodeid = emobility-community-01
logfile = /data/emobility/log/centralsystemEWS.log
[LOG4POT]
log4pot = true
nodeid = log4pot-community-01
logfile = /data/log4pot/log/log4pot.log
[CONPOT]
conpot = true
nodeid = conpot-community-01
logfile = /data/conpot/log/conpot*.json
[ELASTICPOT]
elasticpot = true
nodeid = elasticpot-community-01
logfile = /data/elasticpot/log/elasticpot.json
[SURICATA]
suricata = false
nodeid = suricata-community-01
logfile = /data/suricata/log/eve.json
[MAILONEY]
mailoney = true
nodeid = mailoney-community-01
logfile = /data/mailoney/log/commands.log
[MEDPOT]
medpot = true
nodeid = medpot-community-01
logfile = /data/medpot/log/medpot.log
[RDPY]
rdpy = false
nodeid = rdpy-community-01
logfile = /data/rdpy/log/rdpy.log
[MINIPRINT]
miniprint = true
nodeid = miniprint-community-01
logfile = /data/miniprint/log/miniprint.json
[VNCLOWPOT]
vnclowpot = false
nodeid = vnclowpot-community-01
logfile = /data/vnclowpot/log/vnclowpot.log
[REDISHONEYPOT]
redishoneypot = true
nodeid = redishoneypot-community-01
logfile = /data/redishoneypot/log/redishoneypot.log
[HERALDING]
heralding = true
nodeid = heralding-community-01
logfile = /data/heralding/log/auth.csv
[SENTRYPEER]
sentrypeer = true
nodeid = sentrypeer-community-01
logfile = /data/sentrypeer/log/sentrypeer.json
[CISCOASA]
ciscoasa = true
nodeid = ciscoasa-community-01
logfile = /data/ciscoasa/log/ciscoasa.log
[TANNER]
tanner = true
nodeid = tanner-community-01
logfile = /data/tanner/log/tanner_report.json
[WORDPOT]
wordpot = true
nodeid = wordpot-community-01
logfile = /data/wordpot/log/wordpot.log
[GLUTTON]
glutton = true
nodeid = glutton-community-01
logfile = /data/glutton/log/glutton.log
[HONEYSAP]
honeysap = false
nodeid = honeysap-community-01
logfile = /data/honeysap/log/honeysap-external.log
[ADBHONEY]
adbhoney = true
nodeid = adbhoney-community-01
logfile = /data/adbhoney/log/adbhoney.json
malwaredir = /data/adbhoney/downloads
[FATT]
fatt = false
nodeid = fatt-community-01
logfile = /data/fatt/log/fatt.log
[IPPHONEY]
ipphoney = true
nodeid = ipphoney-community-01
logfile = /data/ipphoney/log/ipphoney.json
[DICOMPOT]
dicompot = true
nodeid = dicompot-community-01
logfile = /data/dicompot/log/dicompot.log
[MEDPOT]
medpot = true
nodeid = medpot-community-01
logfile = /data/medpot/log/medpot.log
[HONEYPY]
honeypy = false
nodeid = honeypy-community-01
logfile = /data/honeypy/log/json.log
[CITRIX]
citrix = true
nodeid = citrix-community-01
logfile = /data/citrixhoneypot/logs/server.log
[REDISHONEYPOT]
redishoneypot = true
nodeid = redishoneypot-community-01
logfile = /data/redishoneypot/log/redishoneypot.log
[ENDLESSH]
endlessh = true
nodeid = endlessh-community-01
logfile = /data/endlessh/log/endlessh.log
[SENTRYPEER]
sentrypeer = true
nodeid = sentrypeer-community-01
logfile = /data/sentrypeer/log/sentrypeer.json
[LOG4POT]
log4pot = true
nodeid = log4pot-community-01
logfile = /data/log4pot/log/log4pot.log

View file

@ -1,205 +0,0 @@
[MAIN]
homedir = /opt/ewsposter/
spooldir = /opt/ewsposter/spool/
logdir = /opt/ewsposter/log/
del_malware_after_send = false
send_malware = false
sendlimit = 5000
contact = your_email_address
proxy = None
ip_int = None
ip_ext = None
[EWS]
ews = true
username = community-01-user
token = foth{a5maiCee8fineu7
rhost_first = https://community.sicherheitstacho.eu/ews-0.1/alert/postSimpleMessage
rhost_second = https://community.sicherheitstacho.eu/ews-0.1/alert/postSimpleMessage
ignorecert = false
[HPFEED]
hpfeed = %(EWS_HPFEEDS_ENABLE)s
host = %(EWS_HPFEEDS_HOST)s
port = %(EWS_HPFEEDS_PORT)s
channels = %(EWS_HPFEEDS_CHANNELS)s
ident = %(EWS_HPFEEDS_IDENT)s
secret= %(EWS_HPFEEDS_SECRET)s
# path/to/certificate for tls broker - or "false" for non-tls broker
tlscert = %(EWS_HPFEEDS_TLSCERT)s
# hpfeeds submission format: "ews" (xml) or "json"
hpfformat = %(EWS_HPFEEDS_FORMAT)s
[EWSJSON]
json = false
jsondir = /data/ews/json/
[INFLUXDB]
influxdb = false
host = http://localhost
port = 8086
username = <your username for influx 1.8>
password = <your password for influx 1.8>
token = <your token for influx 2.0>
bucket = <your bucket/database for 2.0/1.8>
org = <your org for influx 2.0>
[ADBHONEY]
adbhoney = true
nodeid = adbhoney-community-01
logfile = /data/adbhoney/log/adbhoney.json
malwaredir = /data/adbhoney/downloads
[BEELZEBUB]
beelzebub = true
nodeid = beelzebub-community-01
logfile = /data/beelzebub/log/beelzebub.json
[CISCOASA]
ciscoasa = true
nodeid = ciscoasa-community-01
logfile = /data/ciscoasa/log/ciscoasa.log
[CITRIX]
citrix = true
nodeid = citrix-community-01
logfile = /data/citrixhoneypot/logs/server.log
[CONPOT]
conpot = true
nodeid = conpot-community-01
logdir = /data/conpot/log
[COWRIE]
cowrie = true
nodeid = cowrie-community-01
logfile = /data/cowrie/log/cowrie.json
[DDOSPOT]
ddospot = true
nodeid = ddospot-community-01
logdir = /data/ddospot/log
[DICOMPOT]
dicompot = true
nodeid = dicompot-community-01
logfile = /data/dicompot/log/dicompot.log
[DIONAEA]
dionaea = true
nodeid = dionaea-community-01
malwaredir = /data/dionaea/binaries/
sqlitedb = /data/dionaea/log/dionaea.sqlite
[ELASTICPOT]
elasticpot = true
nodeid = elasticpot-community-01
logfile = /data/elasticpot/log/elasticpot.json
[ENDLESSH]
endlessh = true
nodeid = endlessh-community-01
logfile = /data/endlessh/log/endlessh.log
[FATT]
fatt = false
nodeid = fatt-community-01
logfile = /data/fatt/log/fatt.log
[GALAH]
galah = true
nodeid = galah-community-01
logfile = /data/galah/log/galah.json
[GLUTTON]
glutton = true
nodeid = glutton-community-01
logfile = /data/glutton/log/glutton.log
[GOPOT]
gopot = true
nodeid = gopot-community-01
logfile = /data/go-pot/log/go-pot.json
[H0NEYTR4P]
h0neytr4p = true
nodeid = h0neytr4p-community-01
logfile = /data/h0neytr4p/log/log.json
payloaddir = /data/h04neytr4p/payload
[HELLPOT]
hellpot = true
nodeid = hellpot-community-01
logfile = /data/hellpot/log/hellpot.log
[HERALDING]
heralding = true
nodeid = heralding-community-01
logfile = /data/heralding/log/auth.csv
[HONEYAML]
honeyaml = true
nodeid = honeyaml-community-01
logfile = /data/honeyaml/log/honeyaml.log
[HONEYPOTS]
honeypots = true
nodeid = honeypots-community-01
logdir = /data/honeypots/log
[HONEYTRAP]
honeytrap = true
nodeid = honeytrap-community-01
newversion = true
payloaddir = /data/honeytrap/attacks/
attackerfile = /data/honeytrap/log/attacker.log
[IPPHONEY]
ipphoney = true
nodeid = ipphoney-community-01
logfile = /data/ipphoney/log/ipphoney.json
[LOG4POT]
log4pot = true
nodeid = log4pot-community-01
logfile = /data/log4pot/log/log4pot.log
[MAILONEY]
mailoney = true
nodeid = mailoney-community-01
logfile = /data/mailoney/log/commands.log
[MEDPOT]
medpot = true
nodeid = medpot-community-01
logfile = /data/medpot/log/medpot.log
[MINIPRINT]
miniprint = true
nodeid = miniprint-community-01
logfile = /data/miniprint/log/miniprint.json
[REDISHONEYPOT]
redishoneypot = true
nodeid = redishoneypot-community-01
logfile = /data/redishoneypot/log/redishoneypot.log
[SENTRYPEER]
sentrypeer = true
nodeid = sentrypeer-community-01
logfile = /data/sentrypeer/log/sentrypeer.json
[SURICATA]
suricata = false
nodeid = suricata-community-01
logfile = /data/suricata/log/eve.json
[TANNER]
tanner = true
nodeid = tanner-community-01
logfile = /data/tanner/log/tanner_report.json
[WORDPOT]
wordpot = true
nodeid = wordpot-community-01
logfile = /data/wordpot/log/wordpot.log

View file

@ -1,3 +1,5 @@
version: '2.3'
networks:
ewsposter_local:
@ -21,7 +23,7 @@ services:
- EWS_HPFEEDS_SECRET=secret
- EWS_HPFEEDS_TLSCERT=false
- EWS_HPFEEDS_FORMAT=json
image: "ghcr.io/telekom-security/ewsposter:24.04.1"
image: "dtagdevsec/ewsposter:24.04"
volumes:
- $HOME/tpotce/data:/data
- $HOME/tpotce/data/ews/conf/ews.ip:/opt/ewsposter/ews.ip

View file

@ -1,8 +1,7 @@
FROM alpine:3.20
FROM alpine:3.19
#
# Install packages
RUN apk --no-cache -U upgrade && \
apk --no-cache -U add \
# Get and install dependencies & packages
RUN apk -U --no-cache add \
git \
libcap \
py3-libxml2 \
@ -22,8 +21,11 @@ RUN apk --no-cache -U upgrade && \
git clone https://github.com/0x4D31/fatt && \
cd fatt && \
git checkout c29e553514281e50781f86932b82337a5ada5640 && \
#git checkout 45cabf0b8b59162b99a1732d853efb01614563fe && \
#git checkout 314cd1ff7873b5a145a51ec4e85f6107828a2c79 && \
mkdir -p log && \
pip3 install --no-cache-dir --break-system-packages pyshark && \
# pyshark >= 0.4.3 breaks fatt
pip3 install --break-system-packages pyshark==0.4.2.11 && \
#
# Setup configs
chgrp fatt /usr/bin/dumpcap && \
@ -32,14 +34,12 @@ RUN apk --no-cache -U upgrade && \
#
# Clean up
apk del --purge git \
python3-dev && \
rm -rf /root/* \
/var/cache/apk/* \
/opt/fatt/.git
python3-dev && \
rm -rf /root/* /var/cache/apk/* /opt/fatt/.git
#
# Start fatt
STOPSIGNAL SIGINT
ENV PYTHONPATH /opt/fatt
WORKDIR /opt/fatt
USER fatt:fatt
CMD python3 fatt.py -i $(ip route | grep "^default" | awk '{ print $5 }') --print_output --json_logging -o log/fatt.log
CMD python3 fatt.py -i $(/sbin/ip address show | /usr/bin/awk '/inet.*brd/{ print $NF; exit }') --print_output --json_logging -o log/fatt.log

View file

@ -1,11 +1,11 @@
version: '2.3'
services:
# Fatt service
fatt:
build: .
container_name: fatt
stdin_open: true
tty: true
restart: always
# cpu_count: 1
# cpus: 0.75
@ -14,6 +14,6 @@ services:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: "ghcr.io/telekom-security/fatt:24.04.1"
image: "dtagdevsec/fatt:24.04"
volumes:
- $HOME/tpotce/data/fatt/log:/opt/fatt/log

View file

@ -1,26 +0,0 @@
FROM golang:1.23-alpine AS builder
RUN <<EOF
apk --no-cache -U upgrade
apk --no-cache -U add build-base git
mkdir -p /opt
cd /opt
git clone https://github.com/t3chn0m4g3/galah
cd galah
git checkout d4739ec5abaed83c5367716a77908be548d3d003
EOF
WORKDIR /opt/galah
ENV CGO_ENABLED=1
RUN <<EOF
go mod download
go build -o galah ./cmd/galah
EOF
#
FROM alpine:3.20
RUN apk --no-cache -U upgrade && \
apk --no-cache -U add openssl
COPY --from=builder /opt/galah/ /opt/galah/
#
# Start galah
WORKDIR /opt/galah
USER 2000:2000
CMD ["./entrypoint.sh"]

View file

@ -1,33 +0,0 @@
networks:
galah_local:
services:
# Galah service
galah:
build: .
container_name: galah
restart: always
# cpu_count: 1
# cpus: 0.25
networks:
- galah_local
ports:
- "80:80"
- "443:443"
- "8443:8443"
- "8080:8080"
image: ghcr.io/telekom-security/galah:24.04.1
environment:
LLM_PROVIDER: "ollama"
LLM_SERVER_URL: "http://ollama.local:11434"
LLM_MODEL: "llama3.1"
# LLM_TEMPERATURE: ${GALAH_LLM_TEMPERATURE}
# LLM_API_KEY: ${GALAH_LLM_API_KEY}
# LLM_CLOUD_LOCATION: ${GALAH_LLM_CLOUD_LOCATION}
# LLM_CLOUD_PROJECT: ${GALAH_LLM_CLOUD_PROJECT}
read_only: true
volumes:
- $HOME/tpotce/data/galah/cache:/opt/galah/config/cache
- $HOME/tpotce/data/galah/cert:/opt/galah/config/cert
- $HOME/tpotce/data/galah/log:/opt/galah/log

View file

@ -1,43 +1,54 @@
FROM golang:1.23-alpine AS builder
FROM golang:1.21-alpine as builder
#
# Include dist
COPY dist/ /root/dist/
#
# Setup apk
RUN apk --no-cache -U upgrade && \
apk -U --no-cache add \
make \
git \
g++ \
RUN apk -U --no-cache add \
build-base \
git \
g++ \
iptables-dev \
libpcap-dev && \
#
# Setup go, glutton
export GO111MODULE=on && \
mkdir -p /opt/ && \
cd /opt/ && \
git clone https://github.com/mushorg/glutton && \
cd /opt/glutton/ && \
git checkout b3b5944b79893ccb1da19e112571674841bbe124 && \
git checkout c1204c65ce32bfdc0e08fb2a9abe89b3b8eeed62 && \
cp /root/dist/system.go . && \
go mod download && \
make build && \
cp /root/dist/*.yaml /opt/glutton/config/
mv /root/dist/config.yaml /opt/glutton/config/
#
FROM alpine:3.20
FROM alpine:3.19
#
COPY --from=builder /opt/glutton/bin /opt/glutton/bin
COPY --from=builder /opt/glutton/config /opt/glutton/config
COPY --from=builder /opt/glutton/rules /opt/glutton/rules
#
RUN apk -U --no-cache upgrade && \
apk -U --no-cache add \
RUN apk -U --no-cache add \
iptables \
iptables-dev \
libnetfilter_queue-dev \
libcap \
libpcap-dev && \
libpcap-dev && \
setcap cap_net_admin,cap_net_raw=+ep /opt/glutton/bin/server && \
setcap cap_net_admin,cap_net_raw=+ep /sbin/xtables-nft-multi && \
mkdir -p /var/log/glutton \
/opt/glutton/payloads
/opt/glutton/payloads && \
#
# Setup user, groups and configs
addgroup -g 2000 glutton && \
adduser -S -s /bin/ash -u 2000 -D -g 2000 glutton && \
#
# Clean up
rm -rf /var/cache/apk/* \
/root/*
#
# Start glutton
WORKDIR /opt/glutton
USER 2000:2000
CMD exec bin/server -d true -i $(ip route | grep "^default" | awk '{ print $5 }') -l /var/log/glutton/glutton.log > /dev/null 2>&1
USER glutton:glutton
CMD exec bin/server -d true -i $(/sbin/ip address show | /usr/bin/awk '/inet.*brd/{ print $NF; exit }') -l /var/log/glutton/glutton.log > /dev/null 2>&1

View file

@ -1,7 +1,6 @@
ports:
tcp: 5000
udp: 5001
ssh: 2222
# udp: 5001
rules_path: config/rules.yaml

View file

@ -1,36 +0,0 @@
rules:
- match: tcp dst port 23 or port 2323 or port 23231
type: conn_handler
target: telnet
- match: tcp dst port 1883
type: conn_handler
target: mqtt
- match: tcp dst port 6969
type: conn_handler
target: bittorrent
- match: tcp dst port 25
type: conn_handler
target: smtp
- match: tcp dst port 3389
type: conn_handler
target: rdp
- match: tcp dst port 445
type: conn_handler
target: smb
- match: tcp dst port 21
type: conn_handler
target: ftp
- match: tcp dst port 5060
type: conn_handler
target: sip
- match: tcp dst port 5222 or port 5223
type: conn_handler
target: jabber
- match: tcp dst port 11211
type: conn_handler
target: memcache
- match: tcp
type: conn_handler
target: tcp
- match: udp
type: drop

View file

@ -27,12 +27,12 @@ func (g *Glutton) startMonitor(quit chan struct{}) {
for {
select {
// case <-ticker.C:
// openFiles, err := countOpenFiles()
// if err != nil {
// fmt.Printf("Failed :%s", err)
// }
// runningRoutines := runtime.NumGoroutine()
// g.Logger.Info(fmt.Sprintf("running Go routines: %d, open files: %d", openFiles, runningRoutines))
// openFiles, err := countOpenFiles()
// if err != nil {
// fmt.Printf("Failed :%s", err)
// }
// runningRoutines := runtime.NumGoroutine()
// g.Logger.Info(fmt.Sprintf("running Go routines: %d, open files: %d", openFiles, runningRoutines))
case <-quit:
g.Logger.Info("monitoring stopped...")
ticker.Stop()

View file

@ -1,3 +1,5 @@
version: '2.3'
services:
# glutton service

View file

@ -1,22 +0,0 @@
FROM golang:1.23-alpine AS builder
RUN <<EOF
apk -U upgrade
apk -U add git
mkdir -p /opt
cd /opt
git clone https://github.com/ryanolee/go-pot -b v1.0.0
EOF
WORKDIR /opt/go-pot
#
RUN CGO_ENABLED=0 GOOS=linux go build -o /opt/go-pot/go-pot
#
FROM scratch
#
COPY --from=builder /opt/go-pot/go-pot /opt/go-pot/go-pot
COPY dist/config.yml /opt/go-pot/config.yml
#
STOPSIGNAL SIGINT
USER 2000:2000
WORKDIR /opt/go-pot
CMD ["start", "--host", "0.0.0.0", "--config-file", "config.yml"]
ENTRYPOINT ["./go-pot"]

Some files were not shown because too many files have changed in this diff Show more