Compare commits
No commits in common. "master" and "16.10.1" have entirely different histories.
174
.env
|
|
@ -1,174 +0,0 @@
|
|||
# T-Pot config file. Do not remove.
|
||||
|
||||
###############################################
|
||||
# T-Pot Base Settings - Adjust to your needs. #
|
||||
###############################################
|
||||
|
||||
# Set Web usernames and passwords here. This section will be used to create / update the Nginx password file nginxpasswd.
|
||||
# <empty>: This is the default
|
||||
# <base64 encoded htpasswd usernames / passwords>:
|
||||
# Use 'htpasswd -n -b "username" "password" | base64 -w0' to create the WEB_USER if you want to manually deploy T-Pot, run 'install.sh' to automatically add a user during installation, or 'genuser.sh' if you just want to add a web user.
|
||||
# Example: 'htpasswd -n -b "tsec" "tsec" | base64 -w0' will print dHNlYzokYXByMSRYUnE2SC5rbiRVRjZQM1VVQmJVNWJUQmNmSGRuUFQxCgo=
|
||||
# Copy the string and replace WEB_USER=dHNlYzokYXByMSRYUnE2SC5rbiRVRjZQM1VVQmJVNWJUQmNmSGRuUFQxCgo=
|
||||
# Multiple users are possible:
|
||||
# WEB_USER=dHNlYzokYXByMSRYUnE2SC5rbiRVRjZQM1VVQmJVNWJUQmNmSGRuUFQxCgo= dHNlYzokYXByMSR6VUFHVWdmOCRROXI3a09CTjFjY3lCeU1DTloyanEvCgo=
|
||||
WEB_USER=
|
||||
|
||||
# Set Logstash Web usernames and passwords here. This section will be used to create / update the Nginx password file lswebpasswd.
|
||||
# The Lostsash Web usernames are used for T-Pot log ingestion via Logstash, each sensor should have its own user.
|
||||
# <empty>: This is empty by default.
|
||||
# <'htpasswd encoded usernames / passwords'>:
|
||||
# Use 'htpasswd -n -b "username" "password" | base64 -w0' to create the LS_WEB_USER if you want to manually deploy the sensor.
|
||||
# Example: 'htpasswd -n -b "sensor" "sensor" | base64 -w0' will print c2Vuc29yOiRhcHIxJGVpMHdzUmdYJHNyWHF4UG53ZzZqWUc3aEFaUWxrWDEKCg==
|
||||
# Copy the string and replace / add LS_WEB_USER=c2Vuc29yOiRhcHIxJGVpMHdzUmdYJHNyWHF4UG53ZzZqWUc3aEFaUWxrWDEKCg==
|
||||
# Multiple users are possible:
|
||||
# LS_WEB_USER=c2Vuc29yMTokYXByMSQ5aXhNRk5yMCR6d3F2dGFwQ2x0cFBhU1pqMm9ZemYxCgo= c2Vuc29yMjokYXByMSRtYTlOS1J2NCQvU3dsVVBMeW5RaVIyM3pyWVAzOUkwCgo=
|
||||
LS_WEB_USER=
|
||||
|
||||
# T-Pot Blackhole
|
||||
# ENABLED: T-Pot will download a db of known mass scanners and nullroute them.
|
||||
# Be aware, this will put T-Pot off the map for stealth reasons and
|
||||
# you will get less traffic. Routes will be active until next reboot
|
||||
# and will be re-added with every T-Pot start until disabled.
|
||||
# DISABLED: This is the default and no stealth efforts are in place.
|
||||
TPOT_BLACKHOLE=DISABLED
|
||||
|
||||
# T-Pot Persistence
|
||||
# on: This is the default. T-Pot will keep the honeypot logfiles and rotate
|
||||
# with logrotate for 30 days.
|
||||
# off: This is recommended for Raspberry Pi or setups with weaker CPUs or
|
||||
# if you just do not need any of the logfiles.
|
||||
TPOT_PERSISTENCE=on
|
||||
|
||||
# T-Pot Persistence Cycles
|
||||
# <1-999>: Set the number of T-Pot restart cycles for logrotate.
|
||||
# Be mindful of this setting as the logs will use up a lot of available disk space.
|
||||
# In case the setting is invalid, T-Pot will default to 30 cycles.
|
||||
# Remember to adjust the Elastic Search Lifecycle Policy (https://github.com/telekom-security/tpotce/?tab=readme-ov-file#log-persistence)
|
||||
# as this setting only accounts for the honeypot logs in the ~/tpotce/data folder.
|
||||
TPOT_PERSISTENCE_CYCLES=30
|
||||
|
||||
# T-Pot Type
|
||||
# HIVE: This is the default and offers everything to connect T-Pot sensors.
|
||||
# SENSOR: This needs to be used when running a sensor. Be aware to adjust all other
|
||||
# settings as well.
|
||||
# 1. You will need to copy compose/sensor.yml to ./docker-compose.yml
|
||||
# 2. From HIVE host you will need to copy ~/tpotce/data/nginx/cert/nginx.crt to
|
||||
# your SENSOR host to ~/tpotce/data/hive.crt
|
||||
# 3. On HIVE: Create a web user per SENSOR on HIVE and provide credentials below
|
||||
# Create credentials with 'htpasswd ~/tpotce/data/nginx/conf/lswebpasswd <username>'
|
||||
# 4. On SENSOR: Provide username / password from (3) for TPOT_HIVE_USER as base64 encoded string:
|
||||
# "echo -n 'username:password' | base64 -w0"
|
||||
# MOBILE: This will set the correct type for T-Pot Mobile (https://github.com/telekom-security/tpotmobile)
|
||||
TPOT_TYPE=HIVE
|
||||
|
||||
# T-Pot Hive User (only relevant for SENSOR deployment)
|
||||
# <empty>: This is empty by default.
|
||||
# <base64 encoded string>: Provide a base64 encoded string "echo -n 'username:password' | base64 -w0"
|
||||
# i.e. TPOT_HIVE_USER='dXNlcm5hbWU6cGFzc3dvcmQ='
|
||||
TPOT_HIVE_USER=
|
||||
|
||||
# Logstash Sensor SSL verfication (only relevant on SENSOR hosts)
|
||||
# full: This is the default. Logstash, by default, verifies the complete certificate chain for ssl certificates.
|
||||
# This also includes the FQDN and sANs. By default T-Pot will only generate a self-signed certificate which
|
||||
# contains a sAN for the HIVE IP. In scenario where the HIVE needs to be accessed via Internet, maybe with
|
||||
# a different NAT address, a new certificate needs to be generated before deployment that includes all the
|
||||
# IPs and FQDNs as sANs for logstash successfully establishing a connection to the HIVE for transmitting
|
||||
# logs. Details here: https://github.com/telekom-security/tpotce?tab=readme-ov-file#distributed-deployment
|
||||
# none: This setting will disable the ssl verification check of logstash and should only be used in a testing
|
||||
# environment where IPs often change. It is not recommended for a production environment where trust between
|
||||
# HIVE and SENSOR is only established through a self signed certificate.
|
||||
LS_SSL_VERIFICATION=full
|
||||
|
||||
# T-Pot Hive IP (only relevant for SENSOR deployment)
|
||||
# <empty>: This is empty by default.
|
||||
# <IP, FQDN>: This can be either a IP (i.e. 192.168.1.1) or a FQDN (i.e. foo.bar.local)
|
||||
TPOT_HIVE_IP=
|
||||
|
||||
# T-Pot AttackMap Text Output
|
||||
# ENABLED: This is the default and the docker container map_data will print events to the console.
|
||||
# DISABLED: Printing events to the console is disabled.
|
||||
TPOT_ATTACKMAP_TEXT=ENABLED
|
||||
|
||||
# T-Pot AttackMap Text Output Timezone
|
||||
# UTC: (T-Pot default) This is usually the best option.
|
||||
# Continent/City: In Linux you can check our timezone with `readlink` /etc/localtime or
|
||||
# see the full list here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
|
||||
# Examples: America/New_York, Asia/Taipei, Australia/Melbourne, Europe/Athens, Europe/Berlin
|
||||
TPOT_ATTACKMAP_TEXT_TIMEZONE=UTC
|
||||
|
||||
###################################################################################
|
||||
# Honeypots / Tools settings
|
||||
###################################################################################
|
||||
# Some services / tools offer adjustments using ENVs which can be adjusted here.
|
||||
###################################################################################
|
||||
|
||||
# Suricata ET Pro ruleset
|
||||
# OPEN: This is the default and will the ET Open ruleset
|
||||
# OINKCODE: Replace OPEN with your Oinkcode to use the ET Pro ruleset
|
||||
OINKCODE=OPEN
|
||||
|
||||
# Beelzebub Honeypot supports LLMs such as ChatGPT and the Ollama backend.
|
||||
# Beelzebub is not part of the standard edition, please follow the README regarding setup.
|
||||
# It is recommended to use the Ollama backend to keep costs at bay.
|
||||
# Remember to rate limit API usage / set budget alerts when using ChatGPT API.
|
||||
# BEELZEBUB_LLM_MODEL: Set to "ollama" or "gpt4-o".
|
||||
# BEELZEBUB_LLM_HOST: When using "ollama" set it to the URL of your Ollama backend.
|
||||
# BEELZEBUB_OLLAMA_MODEL: Set to the model you are serving on your Ollama backend, i.e. "openchat".
|
||||
# BEELZEBUB_LLM_MODEL: "gpt4-o"
|
||||
# BEELZEBUB_OPENAISECRETKEY: "sk-proj-123456"
|
||||
BEELZEBUB_LLM_MODEL: "ollama"
|
||||
BEELZEBUB_LLM_HOST: "http://ollama.local:11434/api/chat"
|
||||
BEELZEBUB_OLLAMA_MODEL: "openchat"
|
||||
|
||||
# Galah is a LLM-powered web honeypot supporting various LLM backends.
|
||||
# Galah is not part of the standard edition, please follow the README regarding setup.
|
||||
# It is recommended to use the Ollama backend to keep costs at bay.
|
||||
# Remember to rate limit API usage / set budget alerts when using ChatGPT API.
|
||||
# GALAH_LLM_PROVIDER: Set to "ollama" or "gpt4-o".
|
||||
# GALAH_LLM_SERVER_URL: When using "ollama" set it to the URL of your Ollama backend.
|
||||
# GALAH_LLM_MODEL: Set to the model you are serving on your Ollama backend, i.e. "llama3".
|
||||
# GALAH_LLM_TEMPERATURE: "1"
|
||||
# GALAH_LLM_API_KEY: "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
# GALAH_LLM_CLOUD_LOCATION: ""
|
||||
# GALAH_LLM_CLOUD_PROJECT: ""
|
||||
GALAH_LLM_PROVIDER: "ollama"
|
||||
GALAH_LLM_SERVER_URL: "http://ollama.local:11434"
|
||||
GALAH_LLM_MODEL: "llama3.1"
|
||||
|
||||
|
||||
###################################################################################
|
||||
# NEVER MAKE CHANGES TO THIS SECTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!! #
|
||||
###################################################################################
|
||||
|
||||
# docker.sock Path
|
||||
TPOT_DOCKER_SOCK=/var/run/docker.sock
|
||||
|
||||
# docker compose .env
|
||||
TPOT_DOCKER_ENV=./.env
|
||||
|
||||
# Docker-Compose file
|
||||
TPOT_DOCKER_COMPOSE=./docker-compose.yml
|
||||
|
||||
# T-Pot Docker Repo
|
||||
# Depending on where you are located you may choose between DockerHub and GHCR
|
||||
# dtagdevsec: This will use the DockerHub image registry
|
||||
# ghcr.io/telekom-security: This will use the GitHub container registry
|
||||
TPOT_REPO=ghcr.io/telekom-security
|
||||
|
||||
# T-Pot Version Tag
|
||||
TPOT_VERSION=24.04.1
|
||||
|
||||
# T-Pot Pull Policy
|
||||
# always: (T-Pot default) Compose implementations SHOULD always pull the image from the registry.
|
||||
# never: Compose implementations SHOULD NOT pull the image from a registry and SHOULD rely on the platform cached image.
|
||||
# missing: Compose implementations SHOULD pull the image only if it's not available in the platform cache.
|
||||
# build: Compose implementations SHOULD build the image. Compose implementations SHOULD rebuild the image if already present.
|
||||
TPOT_PULL_POLICY=always
|
||||
|
||||
# T-Pot Data Path
|
||||
TPOT_DATA_PATH=./data
|
||||
|
||||
# OSType (linux, mac, win)
|
||||
# Most docker features are available on linux
|
||||
TPOT_OSTYPE=linux
|
||||
38
.github/ISSUE_TEMPLATE.md
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
# Contribution
|
||||
|
||||
Thank you for your decision to contribute to T-Pot.
|
||||
|
||||
## Issues
|
||||
|
||||
Please feel free to post your problems, ideas and issues [here](https://github.com/dtag-dev-sec/tpotce/issues). We will try to answer ASAP, but to speed things up we encourage you to ...
|
||||
- [ ] Use the [search function](https://github.com/dtag-dev-sec/tpotce/issues?utf8=%E2%9C%93&q=) first
|
||||
- [ ] Check the [FAQ](#faq)
|
||||
- [ ] Provide [basic support information](#info) with regard to your issue
|
||||
|
||||
Thank you :smiley:
|
||||
|
||||
-
|
||||
|
||||
<a name="faq"></a>
|
||||
### FAQ
|
||||
|
||||
##### Where can I find the honeypot logs?
|
||||
###### The honeypot logs are located in `/data/`. You have to login via ssh and run `sudo cd /data/`. Do not change any permissions here or T-Pot will fail to work.
|
||||
|
||||
-
|
||||
|
||||
|
||||
<a name="info"></a>
|
||||
### Baisc support information
|
||||
|
||||
- What T-Pot version are you currtently using?
|
||||
- Are you running on a Intel NUC or a VM?
|
||||
- How long has your installation been running?
|
||||
- Did you install any upgrades or packages?
|
||||
- Did you modify any scripts?
|
||||
- Have you turned persistence on/off?
|
||||
- How much RAM available (login via ssh and run `htop`)?
|
||||
- How much stress are the CPUs under (login via ssh and run `htop`)?
|
||||
- How much swap space is being used (login via ssh and run `htop`)?
|
||||
- How much free disk space is available (login via ssh and run `sudo df -h`)?
|
||||
- What is the current container status (login via ssh and run `sudo start.sh`)?
|
||||
43
.github/ISSUE_TEMPLATE/bug-report-for-t-pot.md
vendored
|
|
@ -1,43 +0,0 @@
|
|||
---
|
||||
name: Bug report for T-Pot 24.04.x
|
||||
about: Bug report for T-Pot 24.04.x
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Successfully raise an issue
|
||||
Before you post your issue make sure it has not been answered yet and provide **⚠️ BASIC SUPPORT INFORMATION** (as requested below) if you come to the conclusion it is a new issue.
|
||||
|
||||
- 🔍 Use the [search function](https://github.com/telekom-security/tpotce/issues?utf8=%E2%9C%93&q=) first
|
||||
- 🧐 Check our [Config Examples & Tutorials](https://github.com/telekom-security/tpotce/discussions/categories/config-examples-tutorials) and the [discussions](https://github.com/telekom-security/tpotce/discussions) in general.
|
||||
- 📚 Consult the documentation of 💻 your Linux OS, 🐳 [Docker](https://docs.docker.com/), the 🦌 [Elastic stack](https://www.elastic.co/guide/index.html) and the 🍯 [T-Pot Readme](https://github.com/telekom-security/tpotce/blob/master/README.md).
|
||||
- ⚙️ The [Troubleshoot Section](https://github.com/telekom-security/tpotce?tab=readme-ov-file#troubleshooting) of the [T-Pot Readme](https://github.com/telekom-security/tpotce/blob/master/README.md) is a good starting point to collect a good set of information for the issue and / or to fix things on your own.
|
||||
- **⚠️ Provide [BASIC SUPPORT INFORMATION](#-basic-support-information-commands-are-expected-to-run-as-root) or similar detailed information with regard to your issue or we will close the issue or convert it into a discussion without further interaction from the maintainers**.<br>
|
||||
|
||||
# ⚠️ Basic support information (commands are expected to run as `root`)
|
||||
|
||||
**We happily take the time to improve T-Pot and take care of things, but we need you to take the time to create an issue that provides us with all the information we need.**
|
||||
|
||||
- What OS are you T-Pot running on?
|
||||
- What is the version of the OS `lsb_release -a` and `uname -a`?
|
||||
- What T-Pot version are you currently using (only **T-Pot 24.04.x** is currently supported)?
|
||||
- What architecture are you running on (i.e. hardware, cloud, VM, etc.)?
|
||||
- Review the `~/install_tpot.log`, attach the log and highlight the errors.
|
||||
- How long has your installation been running?
|
||||
- If it is a fresh install consult the documentation first.
|
||||
- Most likely it is a port conflict or a remote dependency was unavailable.
|
||||
- Retry a fresh installation and only open the issue if the error keeps coming up and is not resolved using the documentation as described [here](#how-to-raise-an-issue).
|
||||
- Did you install upgrades, packages or use the update script?
|
||||
- Did you modify any scripts or configs? If yes, please attach the changes.
|
||||
- Please provide a screenshot of `htop` and `docker stats`.
|
||||
- How much free disk space is available (`df -h`)?
|
||||
- What is the current container status (`dps`)?
|
||||
- On Linux: What is the status of the T-Pot service (`systemctl status tpot`)?
|
||||
- What ports are being occupied? Stop T-Pot `systemctl stop tpot` and run `grc netstat -tulpen`
|
||||
- Stop T-Pot `systemctl stop tpot`
|
||||
- Run `grc netstat -tulpen`
|
||||
- Run T-Pot manually with `docker compose -f ~/tpotce/docker-compose.yml up` and check for errors
|
||||
- Stop execution with `CTRL-C` and `docker compose -f ~/tpotce/docker-compose.yml down -v`
|
||||
- If a single container shows as `DOWN` you can run `docker logs <container-name>` for the latest log entries
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
---
|
||||
name: Feature request for T-Pot 24.04.x
|
||||
about: Suggest an idea for T-Pot 24.04.x
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
---
|
||||
name: General issue for T-Pot 24.04.x
|
||||
about: General issue for T-Pot 24.04.x
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Successfully raise an issue
|
||||
Before you post your issue make sure it has not been answered yet and provide **⚠️ BASIC SUPPORT INFORMATION** (as requested below) if you come to the conclusion it is a new issue.
|
||||
|
||||
- 🔍 Use the [search function](https://github.com/telekom-security/tpotce/issues?utf8=%E2%9C%93&q=) first
|
||||
- 🧐 Check our [Config Examples & Tutorials](https://github.com/telekom-security/tpotce/discussions/categories/config-examples-tutorials) and the [discussions](https://github.com/telekom-security/tpotce/discussions) in general.
|
||||
- 📚 Consult the documentation of 💻 your Linux OS, 🐳 [Docker](https://docs.docker.com/), the 🦌 [Elastic stack](https://www.elastic.co/guide/index.html) and the 🍯 [T-Pot Readme](https://github.com/telekom-security/tpotce/blob/master/README.md).
|
||||
- ⚙️ The [Troubleshoot Section](https://github.com/telekom-security/tpotce?tab=readme-ov-file#troubleshooting) of the [T-Pot Readme](https://github.com/telekom-security/tpotce/blob/master/README.md) is a good starting point to collect a good set of information for the issue and / or to fix things on your own.
|
||||
- **⚠️ Provide [BASIC SUPPORT INFORMATION](#-basic-support-information-commands-are-expected-to-run-as-root) or similar detailed information with regard to your issue or we will close the issue or convert it into a discussion without further interaction from the maintainers**.<br>
|
||||
|
||||
# ⚠️ Basic support information (commands are expected to run as `root`)
|
||||
|
||||
**We happily take the time to improve T-Pot and take care of things, but we need you to take the time to create an issue that provides us with all the information we need.**
|
||||
|
||||
- What OS are you T-Pot running on?
|
||||
- What is the version of the OS `lsb_release -a` and `uname -a`?
|
||||
- What T-Pot version are you currently using (only **T-Pot 24.04.x** is currently supported)?
|
||||
- What architecture are you running on (i.e. hardware, cloud, VM, etc.)?
|
||||
- Review the `~/install_tpot.log`, attach the log and highlight the errors.
|
||||
- How long has your installation been running?
|
||||
- If it is a fresh install consult the documentation first.
|
||||
- Most likely it is a port conflict or a remote dependency was unavailable.
|
||||
- Retry a fresh installation and only open the issue if the error keeps coming up and is not resolved using the documentation as described [here](#how-to-raise-an-issue).
|
||||
- Did you install upgrades, packages or use the update script?
|
||||
- Did you modify any scripts or configs? If yes, please attach the changes.
|
||||
- Please provide a screenshot of `htop` and `docker stats`.
|
||||
- How much free disk space is available (`df -h`)?
|
||||
- What is the current container status (`dps`)?
|
||||
- On Linux: What is the status of the T-Pot service (`systemctl status tpot`)?
|
||||
- What ports are being occupied? Stop T-Pot `systemctl stop tpot` and run `grc netstat -tulpen`
|
||||
- Stop T-Pot `systemctl stop tpot`
|
||||
- Run `grc netstat -tulpen`
|
||||
- Run T-Pot manually with `docker compose -f ~/tpotce/docker-compose.yml up` and check for errors
|
||||
- Stop execution with `CTRL-C` and `docker compose -f ~/tpotce/docker-compose.yml down -v`
|
||||
- If a single container shows as `DOWN` you can run `docker logs <container-name>` for the latest log entries
|
||||
60
.github/workflows/basic-support-info.yml
vendored
|
|
@ -1,60 +0,0 @@
|
|||
name: "Check Basic Support Info"
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, edited]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check-issue:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install jq
|
||||
run: sudo apt-get install jq -y
|
||||
|
||||
- name: Check for "investigate" label
|
||||
id: skip_investigate
|
||||
run: |
|
||||
LABELS=$(jq -r '.issue.labels[].name' "$GITHUB_EVENT_PATH")
|
||||
for label in $LABELS; do
|
||||
if [ "$label" = "investigate" ]; then
|
||||
echo "skip=true" >> $GITHUB_ENV
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
echo "skip=false" >> $GITHUB_ENV
|
||||
|
||||
- name: Check issue for basic support info
|
||||
if: env.skip != 'true'
|
||||
id: check_issue
|
||||
run: |
|
||||
REQUIRED_INFO=("What OS are you T-Pot running on?" "What is the version of the OS" "What T-Pot version are you currently using" "What architecture are you running on" "Review the \`~/install_tpot.log\`" "How long has your installation been running?" "Did you install upgrades, packages or use the update script?" "Did you modify any scripts or configs?" "Please provide a screenshot of \`htop\` and \`docker stats\`." "How much free disk space is available" "What is the current container status" "What is the status of the T-Pot service" "What ports are being occupied?")
|
||||
ISSUE_BODY=$(jq -r '.issue.body' "$GITHUB_EVENT_PATH")
|
||||
MISSING_INFO=()
|
||||
|
||||
for info in "${REQUIRED_INFO[@]}"; do
|
||||
if [[ "$ISSUE_BODY" != *"$info"* ]]; then
|
||||
MISSING_INFO+=("$info")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#MISSING_INFO[@]} -ne 0 ]; then
|
||||
echo "missing=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "missing=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Add "no basic support info" label if necessary
|
||||
if: env.missing == 'true' && env.skip != 'true'
|
||||
run: gh issue edit "$NUMBER" --add-label "no basic support info"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
21
.github/workflows/main.yml
vendored
|
|
@ -1,21 +0,0 @@
|
|||
name: Link Checker
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *' # daily at 2 AM UTC
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
linkChecker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Run lychee link checker
|
||||
uses: lycheeverse/lychee-action@v2.0.2
|
||||
with:
|
||||
args: >
|
||||
--verbose
|
||||
--retry-wait-time 10
|
||||
--max-retries 3
|
||||
README.md
|
||||
24
.github/workflows/stale.yml
vendored
|
|
@ -1,24 +0,0 @@
|
|||
name: "Tag stale issues and pull requests"
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *" # Runs every day at midnight
|
||||
workflow_dispatch: # Allows the workflow to be triggered manually
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v7
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: "This issue has been marked as stale because it has had no activity for 7 days. If you are still experiencing this issue, please comment or it will be closed in 7 days."
|
||||
stale-pr-message: "This pull request has been marked as stale because it has had no activity for 7 days. If you are still working on this, please comment or it will be closed in 7 days."
|
||||
days-before-stale: 7
|
||||
days-before-close: 7
|
||||
stale-issue-label: "stale"
|
||||
exempt-issue-labels: "keep-open"
|
||||
stale-pr-label: "stale"
|
||||
exempt-pr-labels: "keep-open"
|
||||
operations-per-run: 30
|
||||
debug-only: false
|
||||
6
.gitignore
vendored
|
|
@ -1,6 +0,0 @@
|
|||
# Ignore data folder
|
||||
data/
|
||||
_data/
|
||||
**/.DS_Store
|
||||
.idea
|
||||
install_tpot.log
|
||||
46
CHANGELOG.md
|
|
@ -1,46 +0,0 @@
|
|||
# Release Notes / Changelog
|
||||
T-Pot 24.04.1 brings significant updates and exciting new honeypot additions, especially the LLM-based honeypots **Beelzebub** and **Galah**!
|
||||
|
||||
## New Features
|
||||
* **Beelzebub** (SSH) and **Galah** (HTTP) are the first LLM-based honeypots included in T-Pot (requires Ollama installation or a ChatGPT subscription).
|
||||
* **Go-Pot** a HTTP tarpit designed to maximize bot misery by slowly feeding them an infinite stream of fake secrets.
|
||||
* **Honeyaml** a configurable API server honeypot even supporting JWT-based HTTP bearer/token authentication.
|
||||
* **H0neytr4p** a HTTP/S honeypot capable of emulating vulnerabilities using configurable traps.
|
||||
* **Miniprint** a medium-interaction printer honeypot.
|
||||
|
||||
## Updates
|
||||
* **Honeypots** were updated to their latest pushed code and / or releases.
|
||||
* **Editions** have been re-introduced. You can now additionally choose to install T-Pot as **Mini**, **LLM** and **Tarpit** edition.
|
||||
* **Attack Map** has been updated to 2.2.6 including support for all new honeypots.
|
||||
* **Elastic Stack** has been upgrade to 8.16.1.
|
||||
* **Cyberchef** has been updated to the latest release.
|
||||
* **Elasticvue** has been updated to 1.1.0.
|
||||
* **Suricata** has been updated to 7.0.7, now supporting JA4 hashes.
|
||||
* Most honeypots now use **PyInstaller** (for Python) and **Scratch** (for Go) to minimize Docker image sizes.
|
||||
* All new honeypots have been integrated with **Kibana**, featuring dedicated dashboards and visualizations.
|
||||
* **Github Container Registry** is now the default container registry for the T-Pot configuration file `.env`.
|
||||
* Compatibility tested with **Alma 9.5**, **Fedora 41**, **Rocky 9.5**, and **Ubuntu 24.04.1**, with updated supported ISO links.
|
||||
* Docker images now use **Alpine 3.20** or **Scratch** wherever possible.
|
||||
* Updates for `24.04.1` images will be provided continuously through Docker image updates.
|
||||
* **Ddospot** has been moved from the Hive / Sensor installation to the Tarpit installation.
|
||||
|
||||
## Breaking Changes
|
||||
### NGINX
|
||||
- The container no longer runs in host mode, requiring changes to the `docker-compose.yml` and related services.
|
||||
- To avoid confusion and downtime, the `24.04.1` tag for Docker images has been introduced.
|
||||
- **Important**: Actively update T-Pot as described in the [README](https://github.com/telekom-security/tpotce/blob/master/README.md).
|
||||
- **Deprecation Notice**: The `24.04` tagged images will no longer be maintained and will be removed by **2025-01-31**.
|
||||
|
||||
### Suricata
|
||||
- Capture filters have been updated to exclude broadcast, multicast, NetBIOS, IGMP, and MDNS traffic.
|
||||
|
||||
## Thanks & Credits
|
||||
A heartfelt thank you to the contributors who made this release possible:
|
||||
* @elivlo, @mancasa, koalafiedTroll, @trixam, for their backend and ews support!
|
||||
* @mariocandela for his work and updates on Beelzebub based on our discussions!
|
||||
* @ryanolee for approaching us and adding valuable features to go-pot based on our discussions!
|
||||
* @neon-ninja for the work on #1661!
|
||||
* @sarkoziadam for the work on #1643!
|
||||
* @glaslos for the work on #1538!
|
||||
|
||||
… and to the entire T-Pot community for opening issues, sharing ideas, and helping improve T-Pot!
|
||||
43
CITATION.cff
|
|
@ -1,43 +0,0 @@
|
|||
# This CITATION.cff file was generated with cffinit.
|
||||
# Visit https://bit.ly/cffinit to generate yours today!
|
||||
|
||||
cff-version: 1.2.0
|
||||
title: T-Pot 24.04.1
|
||||
message: >-
|
||||
If you use this software, please cite it using the
|
||||
metadata from this file.
|
||||
type: software
|
||||
authors:
|
||||
- name: Deutsche Telekom Security GmbH
|
||||
address: Bonner Talweg 100
|
||||
city: Bonn
|
||||
country: DE
|
||||
post-code: '53113'
|
||||
website: 'https://github.com/telekom-security'
|
||||
- given-names: Marco
|
||||
family-names: Ochse
|
||||
affiliation: Deutsche Telekom Security GmbH
|
||||
identifiers:
|
||||
- type: url
|
||||
value: >-
|
||||
https://github.com/telekom-security/tpotce/releases/tag/24.04.1
|
||||
description: T-Pot Release 24.04.1
|
||||
repository-code: 'https://github.com/telekom-security/tpotce'
|
||||
abstract: >-
|
||||
T-Pot is the all in one, optionally distributed, multiarch
|
||||
(amd64, arm64) honeypot plattform, supporting 20+
|
||||
honeypots and countless visualization options using the
|
||||
Elastic Stack, animated live attack maps and lots of
|
||||
security tools to further improve the deception
|
||||
experience.
|
||||
keywords:
|
||||
- honeypot
|
||||
- deception
|
||||
- t-pot
|
||||
- telekom security
|
||||
- docker
|
||||
- elk
|
||||
license: GPL-3.0
|
||||
commit: release
|
||||
version: 24.04.1
|
||||
date-released: '2024-12-11'
|
||||
38
CONTRIBUTING.MD
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
# Contribution
|
||||
|
||||
Thank you for your decision to contribute to T-Pot.
|
||||
|
||||
## Issues
|
||||
|
||||
Please feel free to post your problems, ideas and issues [here](https://github.com/dtag-dev-sec/tpotce/issues). We will try to answer ASAP, but to speed things up we encourage you to ...
|
||||
- [ ] Use the [search function](https://github.com/dtag-dev-sec/tpotce/issues?utf8=%E2%9C%93&q=) first
|
||||
- [ ] Check the [FAQ](#faq)
|
||||
- [ ] Provide [basic support information](#info) with regard to your issue
|
||||
|
||||
Thank you :smiley:
|
||||
|
||||
-
|
||||
|
||||
<a name="faq"></a>
|
||||
### FAQ
|
||||
|
||||
##### Where can I find the honeypot logs?
|
||||
###### The honeypot logs are located in `/data/`. You have to login via ssh and run `sudo su -` and then `cd /data/`. Do not change any permissions here or T-Pot will fail to work.
|
||||
|
||||
-
|
||||
|
||||
|
||||
<a name="info"></a>
|
||||
### Baisc support information
|
||||
|
||||
- What T-Pot version are you currtently using?
|
||||
- Are you running on a Intel NUC or a VM?
|
||||
- How long has your installation been running?
|
||||
- Did you install any upgrades or packages?
|
||||
- Did you modify any scripts?
|
||||
- Have you turned persistence on/off?
|
||||
- How much RAM available (login via ssh and run `htop`)?
|
||||
- How much stress are the CPUs under (login via ssh and run `htop`)?
|
||||
- How much swap space is being used (login via ssh and run `htop`)?
|
||||
- How much free disk space is available (login via ssh and run `sudo df -h`)?
|
||||
- What is the current container status (login via ssh and run `sudo start.sh`)?
|
||||
23
SECURITY.md
|
|
@ -1,23 +0,0 @@
|
|||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 24.04.1 | :white_check_mark: |
|
||||
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
We prioritize the security of T-Pot highly. Often, vulnerabilities in T-Pot components stem from upstream dependencies, including honeypots, Docker images, tools, or packages. We are committed to working together to resolve any issues effectively.
|
||||
|
||||
Please follow these steps before reporting a potential vulnerability:
|
||||
|
||||
1. Verify that the behavior you've observed isn't already documented as a normal aspect or unrelated issue of T-Pot. For example, Cowrie may initiate outgoing connections, or T-Pot might open all possible TCP ports — a feature enabled by Honeytrap.
|
||||
2. Clearly identify which component is vulnerable (e.g., a specific honeypot, Docker image, tool, package) and isolate the issue.
|
||||
3. Provide a detailed description of the issue, including log and, if available, debug files. Include all steps necessary to reproduce the vulnerability. If you have a proposed solution, hotfix, or patch, please be prepared to submit a pull request (PR).
|
||||
4. Check whether the vulnerability is already known upstream. If there is an existing fix or patch, include that information in your report.
|
||||
|
||||
This approach ensures a thorough and efficient resolution process.
|
||||
|
||||
We aim to respond as quickly as possible. If you believe the issue poses an immediate threat to the entire T-Pot community, you can expedite the process by responsibly alerting our [CERT](https://www.telekom.com/en/corporate-responsibility/data-protection-data-security/security/details/introducing-deutsche-telekom-cert-358316).
|
||||
|
|
@ -1,181 +0,0 @@
|
|||
from datetime import datetime
|
||||
import yaml
|
||||
|
||||
version = \
|
||||
"""
|
||||
____ [T-Pot] _ ____ _ _ _
|
||||
/ ___| ___ _ ____ _(_) ___ ___ | __ ) _ _(_) | __| | ___ _ __
|
||||
\___ \ / _ \ '__\ \ / / |/ __/ _ \ | _ \| | | | | |/ _` |/ _ \ '__|
|
||||
___) | __/ | \ V /| | (_| __/ | |_) | |_| | | | (_| | __/ |
|
||||
|____/ \___|_| \_/ |_|\___\___| |____/ \__,_|_|_|\__,_|\___|_| v0.21
|
||||
|
||||
# This script is intended for users who want to build a customized docker-compose.yml for T-Pot.
|
||||
# T-Pot Service Builder will ask for all the docker services to be included in docker-compose.yml.
|
||||
# The configuration file will be checked for conflicting ports.
|
||||
# Port conflicts have to be resolved manually or re-running the script and excluding the conflicting services.
|
||||
# Review the resulting docker-compose-custom.yml and adjust to your needs by (un)commenting the corresponding lines in the config.
|
||||
"""
|
||||
|
||||
header = \
|
||||
"""# T-Pot: CUSTOM EDITION
|
||||
# Generated on: {current_date}
|
||||
"""
|
||||
|
||||
config_filename = "tpot_services.yml"
|
||||
service_filename = "docker-compose-custom.yml"
|
||||
|
||||
|
||||
def load_config(filename):
|
||||
try:
|
||||
with open(filename, 'r') as file:
|
||||
config = yaml.safe_load(file)
|
||||
except:
|
||||
print_color(f"Error: {filename} not found. Exiting.", "red")
|
||||
exit()
|
||||
return config
|
||||
|
||||
|
||||
def prompt_service_include(service_name):
|
||||
while True:
|
||||
try:
|
||||
response = input(f"Include {service_name}? (y/n): ").strip().lower()
|
||||
if response in ['y', 'n']:
|
||||
return response == 'y'
|
||||
else:
|
||||
print_color("Please enter 'y' for yes or 'n' for no.", "red")
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
print_color("Interrupted by user. Exiting.", "red")
|
||||
print()
|
||||
exit()
|
||||
|
||||
|
||||
def check_port_conflicts(selected_services):
|
||||
all_ports = {}
|
||||
conflict_ports = []
|
||||
|
||||
for service_name, config in selected_services.items():
|
||||
ports = config.get('ports', [])
|
||||
for port in ports:
|
||||
# Split the port mapping and take only the host port part
|
||||
parts = port.split(':')
|
||||
host_port = parts[1] if len(parts) == 3 else (parts[0] if parts[1].isdigit() else parts[1])
|
||||
|
||||
# Check for port conflict and associate it with the service name
|
||||
if host_port in all_ports:
|
||||
conflict_ports.append((service_name, host_port))
|
||||
if all_ports[host_port] not in [service for service, _ in conflict_ports]:
|
||||
conflict_ports.append((all_ports[host_port], host_port))
|
||||
else:
|
||||
all_ports[host_port] = service_name
|
||||
|
||||
if conflict_ports:
|
||||
print_color("[WARNING] - Port conflict(s) detected:", "red")
|
||||
for service, port in conflict_ports:
|
||||
print_color(f"{service}: {port}", "red")
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
|
||||
def print_color(text, color):
|
||||
colors = {
|
||||
"red": "\033[91m",
|
||||
"green": "\033[92m",
|
||||
"blue": "\033[94m", # Added blue
|
||||
"magenta": "\033[95m", # Added magenta
|
||||
"end": "\033[0m",
|
||||
}
|
||||
print(f"{colors[color]}{text}{colors['end']}")
|
||||
|
||||
def enforce_dependencies(selected_services, services):
|
||||
# If snare or any tanner services are selected, ensure all are enabled
|
||||
tanner_services = {'snare', 'tanner', 'tanner_redis', 'tanner_phpox', 'tanner_api'}
|
||||
if tanner_services.intersection(selected_services):
|
||||
print_color("[OK] - For Snare / Tanner to work all required services have been added to your configuration.", "green")
|
||||
for service in tanner_services:
|
||||
selected_services[service] = services[service]
|
||||
|
||||
# If kibana is enabled, also enable elasticsearch
|
||||
if 'kibana' in selected_services:
|
||||
selected_services['elasticsearch'] = services['elasticsearch']
|
||||
print_color("[OK] - Kibana requires Elasticsearch which has been added to your configuration.", "green")
|
||||
|
||||
# If spiderfoot is enabled, also enable nginx
|
||||
if 'spiderfoot' in selected_services:
|
||||
selected_services['nginx'] = services['nginx']
|
||||
print_color("[OK] - Spiderfoot requires Nginx which has been added to your configuration.","green")
|
||||
|
||||
|
||||
# If any map services are detected, enable logstash, elasticsearch, nginx, and all map services
|
||||
map_services = {'map_web', 'map_redis', 'map_data'}
|
||||
if map_services.intersection(selected_services):
|
||||
print_color("[OK] - For AttackMap to work all required services have been added to your configuration.", "green")
|
||||
for service in map_services.union({'elasticsearch', 'nginx'}):
|
||||
selected_services[service] = services[service]
|
||||
|
||||
# honeytrap and glutton cannot be active at the same time, always vote in favor of honeytrap
|
||||
if 'honeytrap' in selected_services and 'glutton' in selected_services:
|
||||
# Remove glutton and notify
|
||||
del selected_services['glutton']
|
||||
print_color("[OK] - Honeytrap and Glutton cannot be active at the same time. Glutton has been removed from your configuration.","green")
|
||||
|
||||
|
||||
def remove_unused_networks(selected_services, services, networks):
|
||||
used_networks = set()
|
||||
# Identify networks used by selected services
|
||||
for service_name in selected_services:
|
||||
service_config = services[service_name]
|
||||
if 'networks' in service_config:
|
||||
for network in service_config['networks']:
|
||||
used_networks.add(network)
|
||||
|
||||
# Remove unused networks
|
||||
for network in list(networks):
|
||||
if network not in used_networks:
|
||||
del networks[network]
|
||||
|
||||
|
||||
def main():
|
||||
config = load_config(config_filename)
|
||||
|
||||
# Separate services and networks
|
||||
services = config['services']
|
||||
networks = config.get('networks', {})
|
||||
selected_services = {'tpotinit': services['tpotinit'],
|
||||
'logstash': services['logstash']} # Always include tpotinit and logstash
|
||||
|
||||
for service_name, service_config in services.items():
|
||||
if service_name not in selected_services: # Skip already included services
|
||||
if prompt_service_include(service_name):
|
||||
selected_services[service_name] = service_config
|
||||
|
||||
# Enforce dependencies
|
||||
enforce_dependencies(selected_services, services)
|
||||
|
||||
# Remove unused networks based on selected services
|
||||
remove_unused_networks(selected_services, services, networks)
|
||||
|
||||
output_config = {
|
||||
'networks': networks,
|
||||
'services': selected_services,
|
||||
}
|
||||
|
||||
current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
with open(service_filename, 'w') as file:
|
||||
file.write(header.format(current_date=current_date))
|
||||
yaml.dump(output_config, file, default_flow_style=False, sort_keys=False, indent=2)
|
||||
|
||||
if check_port_conflicts(selected_services):
|
||||
print_color(f"[WARNING] - Adjust the conflicting ports in the {service_filename} or re-run the script and select services that do not occupy the same port(s).",
|
||||
"red")
|
||||
else:
|
||||
print_color(f"[OK] - Custom {service_filename} has been generated without port conflicts.", "green")
|
||||
print_color(f"Copy {service_filename} to ~/tpotce and test with: docker compose -f {service_filename} up", "blue")
|
||||
print_color(f"If everything works, exit with CTRL-C and replace docker-compose.yml with the new config.", "blue")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print_color(version, "magenta")
|
||||
main()
|
||||
350
compose/llm.yml
|
|
@ -1,350 +0,0 @@
|
|||
# T-Pot: LLM
|
||||
networks:
|
||||
beelzebub_local:
|
||||
galah_local:
|
||||
nginx_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Beelzebub service
|
||||
beelzebub:
|
||||
container_name: beelzebub
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- beelzebub_local
|
||||
ports:
|
||||
- "22:22"
|
||||
# - "80:80"
|
||||
# - "2222:2222"
|
||||
# - "3306:3306"
|
||||
# - "8080:8080"
|
||||
image: ${TPOT_REPO}/beelzebub:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
environment:
|
||||
LLM_MODEL: ${BEELZEBUB_LLM_MODEL}
|
||||
LLM_HOST: ${BEELZEBUB_LLM_HOST}
|
||||
OLLAMA_MODEL: ${BEELZEBUB_OLLAMA_MODEL}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/beelzebub/key:/opt/beelzebub/configurations/key
|
||||
- ${TPOT_DATA_PATH}/beelzebub/log:/opt/beelzebub/configurations/log
|
||||
|
||||
# Galah service
|
||||
galah:
|
||||
container_name: galah
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- galah_local
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "8443:8443"
|
||||
- "8080:8080"
|
||||
image: ${TPOT_REPO}/galah:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
environment:
|
||||
LLM_PROVIDER: ${GALAH_LLM_PROVIDER}
|
||||
LLM_SERVER_URL: ${GALAH_LLM_SERVER_URL}
|
||||
LLM_MODEL: ${GALAH_LLM_MODEL}
|
||||
# LLM_TEMPERATURE: ${GALAH_LLM_TEMPERATURE}
|
||||
# LLM_API_KEY: ${GALAH_LLM_API_KEY}
|
||||
# LLM_CLOUD_LOCATION: ${GALAH_LLM_CLOUD_LOCATION}
|
||||
# LLM_CLOUD_PROJECT: ${GALAH_LLM_CLOUD_PROJECT}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/galah/cache:/opt/galah/config/cache
|
||||
- ${TPOT_DATA_PATH}/galah/cert:/opt/galah/config/cert
|
||||
- ${TPOT_DATA_PATH}/galah/log:/opt/galah/log
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Map Redis Service
|
||||
map_redis:
|
||||
container_name: map_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Map Web Service
|
||||
map_web:
|
||||
container_name: map_web
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=AttackMapServer.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Map Data Service
|
||||
map_data:
|
||||
container_name: map_data
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Nginx service
|
||||
nginx:
|
||||
container_name: nginx
|
||||
restart: always
|
||||
environment:
|
||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /var/tmp/nginx/client_body
|
||||
- /var/tmp/nginx/proxy
|
||||
- /var/tmp/nginx/fastcgi
|
||||
- /var/tmp/nginx/uwsgi
|
||||
- /var/tmp/nginx/scgi
|
||||
- /run
|
||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "64297:64297"
|
||||
- "64294:64294"
|
||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
||||
|
|
@ -1,734 +0,0 @@
|
|||
# T-Pot: MAC_WIN
|
||||
networks:
|
||||
tpotinit_local:
|
||||
adbhoney_local:
|
||||
ciscoasa_local:
|
||||
cowrie_local:
|
||||
dicompot_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
h0neytr4p_local:
|
||||
heralding_local:
|
||||
honeyaml_local:
|
||||
ipphoney_local:
|
||||
mailoney_local:
|
||||
medpot_local:
|
||||
miniprint_local:
|
||||
redishoneypot_local:
|
||||
sentrypeer_local:
|
||||
suricata_local:
|
||||
tanner_local:
|
||||
wordpot_local:
|
||||
nginx_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
########################################
|
||||
#### DEV
|
||||
########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
networks:
|
||||
- tpotinit_local
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Adbhoney service
|
||||
adbhoney:
|
||||
container_name: adbhoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- adbhoney_local
|
||||
ports:
|
||||
- "5555:5555"
|
||||
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
||||
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/cowrie:uid=2000,gid=2000
|
||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
||||
networks:
|
||||
- cowrie_local
|
||||
ports:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dicompot service
|
||||
# Get the Horos Client for testing: https://horosproject.org/
|
||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
||||
dicompot:
|
||||
container_name: dicompot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dicompot_local
|
||||
ports:
|
||||
- "104:11112"
|
||||
- "11112:11112"
|
||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dionaea_local
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "81:81"
|
||||
- "135:135"
|
||||
# - "443:443"
|
||||
# - "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "3306:3306"
|
||||
# - "5060:5060"
|
||||
# - "5060:5060/udp"
|
||||
# - "5061:5061"
|
||||
- "27017:27017"
|
||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# ElasticPot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
||||
|
||||
# H0neytr4p service
|
||||
h0neytr4p:
|
||||
container_name: h0neytr4p
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- h0neytr4p_local
|
||||
ports:
|
||||
- "443:443"
|
||||
# - "80:80"
|
||||
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
|
||||
|
||||
# Heralding service
|
||||
heralding:
|
||||
container_name: heralding
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/heralding:uid=2000,gid=2000
|
||||
networks:
|
||||
- heralding_local
|
||||
ports:
|
||||
# - "21:21"
|
||||
# - "22:22"
|
||||
# - "23:23"
|
||||
# - "25:25"
|
||||
# - "80:80"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
# - "443:443"
|
||||
- "465:465"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
# - "3306:3306"
|
||||
# - "3389:3389"
|
||||
- "1080:1080"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
||||
|
||||
# Honeyaml service
|
||||
honeyaml:
|
||||
container_name: honeyaml
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- honeyaml_local
|
||||
ports:
|
||||
- "3000:8080"
|
||||
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
|
||||
|
||||
# Ipphoney service
|
||||
ipphoney:
|
||||
container_name: ipphoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ipphoney_local
|
||||
ports:
|
||||
- "631:631"
|
||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:25"
|
||||
- "587:25"
|
||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Medpot service
|
||||
medpot:
|
||||
container_name: medpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- medpot_local
|
||||
ports:
|
||||
- "2575:2575"
|
||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
||||
|
||||
# Miniprint service
|
||||
miniprint:
|
||||
container_name: miniprint
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- miniprint_local
|
||||
ports:
|
||||
- "9100:9100"
|
||||
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
|
||||
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
|
||||
|
||||
# Redishoneypot service
|
||||
redishoneypot:
|
||||
container_name: redishoneypot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- redishoneypot_local
|
||||
ports:
|
||||
- "6379:6379"
|
||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
||||
|
||||
# SentryPeer service
|
||||
sentrypeer:
|
||||
container_name: sentrypeer
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
||||
|
||||
#### Snare / Tanner
|
||||
## Tanner Redis Service
|
||||
tanner_redis:
|
||||
container_name: tanner_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## PHP Sandbox service
|
||||
tanner_phpox:
|
||||
container_name: tanner_phpox
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Tanner API Service
|
||||
tanner_api:
|
||||
container_name: tanner_api
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_redis
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
command: tannerapi
|
||||
|
||||
## Tanner Service
|
||||
tanner:
|
||||
container_name: tanner
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_api
|
||||
- tanner_phpox
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
command: tanner
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
||||
|
||||
## Snare Service
|
||||
snare:
|
||||
container_name: snare
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
# Wordpot service
|
||||
wordpot:
|
||||
container_name: wordpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- wordpot_local
|
||||
ports:
|
||||
- "8080:80"
|
||||
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- suricata_local
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
networks:
|
||||
- nginx_local
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Map Redis Service
|
||||
map_redis:
|
||||
container_name: map_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Map Web Service
|
||||
map_web:
|
||||
container_name: map_web
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=AttackMapServer.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Map Data Service
|
||||
map_data:
|
||||
container_name: map_data
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Nginx service
|
||||
nginx:
|
||||
container_name: nginx
|
||||
restart: always
|
||||
environment:
|
||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /var/tmp/nginx/client_body
|
||||
- /var/tmp/nginx/proxy
|
||||
- /var/tmp/nginx/fastcgi
|
||||
- /var/tmp/nginx/uwsgi
|
||||
- /var/tmp/nginx/scgi
|
||||
- /run
|
||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "64297:64297"
|
||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
||||
550
compose/mini.yml
|
|
@ -1,550 +0,0 @@
|
|||
# T-Pot: MINI
|
||||
networks:
|
||||
adbhoney_local:
|
||||
ciscoasa_local:
|
||||
conpot_local_IEC104:
|
||||
conpot_local_guardian_ast:
|
||||
conpot_local_ipmi:
|
||||
conpot_local_kamstrup_382:
|
||||
dicompot_local:
|
||||
honeypots_local:
|
||||
medpot_local:
|
||||
nginx_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Adbhoney service
|
||||
adbhoney:
|
||||
container_name: adbhoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- adbhoney_local
|
||||
ports:
|
||||
- "5555:5555"
|
||||
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
||||
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# Conpot IEC104 service
|
||||
conpot_IEC104:
|
||||
container_name: conpot_iec104
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
||||
- CONPOT_TEMPLATE=IEC104
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_IEC104
|
||||
ports:
|
||||
- "161:161/udp"
|
||||
- "2404:2404"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot guardian_ast service
|
||||
conpot_guardian_ast:
|
||||
container_name: conpot_guardian_ast
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
|
||||
- CONPOT_TEMPLATE=guardian_ast
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_guardian_ast
|
||||
ports:
|
||||
- "10001:10001"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot ipmi
|
||||
conpot_ipmi:
|
||||
container_name: conpot_ipmi
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
||||
- CONPOT_TEMPLATE=ipmi
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_ipmi
|
||||
ports:
|
||||
- "623:623/udp"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot kamstrup_382
|
||||
conpot_kamstrup_382:
|
||||
container_name: conpot_kamstrup_382
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
||||
- CONPOT_TEMPLATE=kamstrup_382
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_kamstrup_382
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Dicompot service
|
||||
# Get the Horos Client for testing: https://horosproject.org/
|
||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
||||
dicompot:
|
||||
container_name: dicompot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dicompot_local
|
||||
ports:
|
||||
- "104:11112"
|
||||
- "11112:11112"
|
||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
||||
|
||||
# Honeypots service
|
||||
honeypots:
|
||||
container_name: honeypots
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp:uid=2000,gid=2000
|
||||
networks:
|
||||
- honeypots_local
|
||||
ports:
|
||||
- "21:21"
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
- "25:25"
|
||||
- "53:53"
|
||||
- "67:67/udp"
|
||||
- "80:80"
|
||||
- "110:110"
|
||||
- "123:123"
|
||||
- "143:143"
|
||||
- "161:161"
|
||||
- "389:389"
|
||||
- "443:443"
|
||||
- "445:445"
|
||||
- "631:631"
|
||||
- "1080:1080"
|
||||
- "1433:1433"
|
||||
- "1521:1521"
|
||||
- "3306:3306"
|
||||
- "3389:3389"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
- "6379:6379"
|
||||
- "6667:6667"
|
||||
- "8080:8080"
|
||||
- "9100:9100"
|
||||
- "9200:9200"
|
||||
- "11211:11211"
|
||||
image: ${TPOT_REPO}/honeypots:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeypots/log:/var/log/honeypots
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/honeytrap:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Medpot service
|
||||
medpot:
|
||||
container_name: medpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- medpot_local
|
||||
ports:
|
||||
- "2575:2575"
|
||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Map Redis Service
|
||||
map_redis:
|
||||
container_name: map_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Map Web Service
|
||||
map_web:
|
||||
container_name: map_web
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=AttackMapServer.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Map Data Service
|
||||
map_data:
|
||||
container_name: map_data
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Nginx service
|
||||
nginx:
|
||||
container_name: nginx
|
||||
restart: always
|
||||
environment:
|
||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /var/tmp/nginx/client_body
|
||||
- /var/tmp/nginx/proxy
|
||||
- /var/tmp/nginx/fastcgi
|
||||
- /var/tmp/nginx/uwsgi
|
||||
- /var/tmp/nginx/scgi
|
||||
- /run
|
||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "64297:64297"
|
||||
- "64294:64294"
|
||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
||||
|
|
@ -1,664 +0,0 @@
|
|||
# T-Pot: MOBILE
|
||||
# Note: This docker compose file has been adjusted to limit the number of tools, services and honeypots to run
|
||||
# T-Pot on a Raspberry Pi 4 (8GB of RAM).
|
||||
# The standard docker compose file should work mostly fine (depending on traffic) if you do not enable a
|
||||
# desktop environment such as LXDE and meet the minimum requirements of 8GB RAM.
|
||||
networks:
|
||||
ciscoasa_local:
|
||||
conpot_local_IEC104:
|
||||
conpot_local_ipmi:
|
||||
conpot_local_kamstrup_382:
|
||||
cowrie_local:
|
||||
dicompot_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
h0neytr4p_local:
|
||||
heralding_local:
|
||||
honeyaml_local:
|
||||
ipphoney_local:
|
||||
log4pot_local:
|
||||
mailoney_local:
|
||||
medpot_local:
|
||||
miniprint_local:
|
||||
redishoneypot_local:
|
||||
sentrypeer_local:
|
||||
tanner_local:
|
||||
wordpot_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# Conpot IEC104 service
|
||||
conpot_IEC104:
|
||||
container_name: conpot_iec104
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
||||
- CONPOT_TEMPLATE=IEC104
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_IEC104
|
||||
ports:
|
||||
- "161:161/udp"
|
||||
- "2404:2404"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot ipmi
|
||||
conpot_ipmi:
|
||||
container_name: conpot_ipmi
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
||||
- CONPOT_TEMPLATE=ipmi
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_ipmi
|
||||
ports:
|
||||
- "623:623/udp"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot kamstrup_382
|
||||
conpot_kamstrup_382:
|
||||
container_name: conpot_kamstrup_382
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
||||
- CONPOT_TEMPLATE=kamstrup_382
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_kamstrup_382
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/cowrie:uid=2000,gid=2000
|
||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
||||
networks:
|
||||
- cowrie_local
|
||||
ports:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dicompot service
|
||||
# Get the Horos Client for testing: https://horosproject.org/
|
||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
||||
dicompot:
|
||||
container_name: dicompot
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dicompot_local
|
||||
ports:
|
||||
- "104:11112"
|
||||
- "11112:11112"
|
||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dionaea_local
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "81:81"
|
||||
- "135:135"
|
||||
# - "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "3306:3306"
|
||||
# - "5060:5060"
|
||||
# - "5060:5060/udp"
|
||||
# - "5061:5061"
|
||||
- "27017:27017"
|
||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# ElasticPot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
||||
|
||||
# H0neytr4p service
|
||||
h0neytr4p:
|
||||
container_name: h0neytr4p
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- h0neytr4p_local
|
||||
ports:
|
||||
- "443:443"
|
||||
# - "80:80"
|
||||
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
|
||||
|
||||
# Heralding service
|
||||
heralding:
|
||||
container_name: heralding
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/heralding:uid=2000,gid=2000
|
||||
networks:
|
||||
- heralding_local
|
||||
ports:
|
||||
# - "21:21"
|
||||
# - "22:22"
|
||||
# - "23:23"
|
||||
# - "25:25"
|
||||
# - "80:80"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
# - "443:443"
|
||||
- "465:465"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
# - "3306:3306"
|
||||
# - "3389:3389"
|
||||
- "1080:1080"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
||||
|
||||
# Honeyaml service
|
||||
honeyaml:
|
||||
container_name: honeyaml
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- honeyaml_local
|
||||
ports:
|
||||
- "3000:8080"
|
||||
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/honeytrap:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Ipphoney service
|
||||
ipphoney:
|
||||
container_name: ipphoney
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ipphoney_local
|
||||
ports:
|
||||
- "631:631"
|
||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
||||
|
||||
# Log4pot service
|
||||
log4pot:
|
||||
container_name: log4pot
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp:uid=2000,gid=2000
|
||||
networks:
|
||||
- log4pot_local
|
||||
ports:
|
||||
# - "80:8080"
|
||||
# - "443:8080"
|
||||
# - "8080:8080"
|
||||
# - "9200:8080"
|
||||
- "25565:8080"
|
||||
image: ${TPOT_REPO}/log4pot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/log4pot/log:/var/log/log4pot/log
|
||||
- ${TPOT_DATA_PATH}/log4pot/payloads:/var/log/log4pot/payloads
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:25"
|
||||
- "587:25"
|
||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Medpot service
|
||||
medpot:
|
||||
container_name: medpot
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- medpot_local
|
||||
ports:
|
||||
- "2575:2575"
|
||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
||||
|
||||
# Miniprint service
|
||||
miniprint:
|
||||
container_name: miniprint
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- miniprint_local
|
||||
ports:
|
||||
- "9100:9100"
|
||||
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
|
||||
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
|
||||
|
||||
# Redishoneypot service
|
||||
redishoneypot:
|
||||
container_name: redishoneypot
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- redishoneypot_local
|
||||
ports:
|
||||
- "6379:6379"
|
||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
||||
|
||||
# SentryPeer service
|
||||
sentrypeer:
|
||||
container_name: sentrypeer
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
||||
|
||||
#### Snare / Tanner
|
||||
## Tanner Redis Service
|
||||
tanner_redis:
|
||||
container_name: tanner_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## PHP Sandbox service
|
||||
tanner_phpox:
|
||||
container_name: tanner_phpox
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Tanner API Service
|
||||
tanner_api:
|
||||
container_name: tanner_api
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_redis
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
command: tannerapi
|
||||
|
||||
## Tanner Service
|
||||
tanner:
|
||||
container_name: tanner
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_api
|
||||
- tanner_phpox
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
command: tanner
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
||||
|
||||
## Snare Service
|
||||
snare:
|
||||
container_name: snare
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
# Wordpot service
|
||||
wordpot:
|
||||
container_name: wordpot
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- wordpot_local
|
||||
ports:
|
||||
- "8080:80"
|
||||
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
|
@ -1,711 +0,0 @@
|
|||
# T-Pot: SENSOR
|
||||
networks:
|
||||
adbhoney_local:
|
||||
ciscoasa_local:
|
||||
conpot_local_IEC104:
|
||||
conpot_local_guardian_ast:
|
||||
conpot_local_ipmi:
|
||||
conpot_local_kamstrup_382:
|
||||
cowrie_local:
|
||||
dicompot_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
h0neytr4p_local:
|
||||
heralding_local:
|
||||
honeyaml_local:
|
||||
ipphoney_local:
|
||||
mailoney_local:
|
||||
medpot_local:
|
||||
miniprint_local:
|
||||
redishoneypot_local:
|
||||
sentrypeer_local:
|
||||
tanner_local:
|
||||
wordpot_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Adbhoney service
|
||||
adbhoney:
|
||||
container_name: adbhoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- adbhoney_local
|
||||
ports:
|
||||
- "5555:5555"
|
||||
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
||||
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# Conpot IEC104 service
|
||||
conpot_IEC104:
|
||||
container_name: conpot_iec104
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
||||
- CONPOT_TEMPLATE=IEC104
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_IEC104
|
||||
ports:
|
||||
- "161:161/udp"
|
||||
- "2404:2404"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot guardian_ast service
|
||||
conpot_guardian_ast:
|
||||
container_name: conpot_guardian_ast
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
|
||||
- CONPOT_TEMPLATE=guardian_ast
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_guardian_ast
|
||||
ports:
|
||||
- "10001:10001"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot ipmi
|
||||
conpot_ipmi:
|
||||
container_name: conpot_ipmi
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
||||
- CONPOT_TEMPLATE=ipmi
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_ipmi
|
||||
ports:
|
||||
- "623:623/udp"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot kamstrup_382
|
||||
conpot_kamstrup_382:
|
||||
container_name: conpot_kamstrup_382
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
||||
- CONPOT_TEMPLATE=kamstrup_382
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_kamstrup_382
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/cowrie:uid=2000,gid=2000
|
||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
||||
networks:
|
||||
- cowrie_local
|
||||
ports:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dicompot service
|
||||
# Get the Horos Client for testing: https://horosproject.org/
|
||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
||||
dicompot:
|
||||
container_name: dicompot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dicompot_local
|
||||
ports:
|
||||
- "104:11112"
|
||||
- "11112:11112"
|
||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dionaea_local
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "81:81"
|
||||
- "135:135"
|
||||
# - "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "3306:3306"
|
||||
# - "5060:5060"
|
||||
# - "5060:5060/udp"
|
||||
# - "5061:5061"
|
||||
- "27017:27017"
|
||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# ElasticPot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
||||
|
||||
# H0neytr4p service
|
||||
h0neytr4p:
|
||||
container_name: h0neytr4p
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- h0neytr4p_local
|
||||
ports:
|
||||
- "443:443"
|
||||
# - "80:80"
|
||||
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
|
||||
|
||||
# Heralding service
|
||||
heralding:
|
||||
container_name: heralding
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/heralding:uid=2000,gid=2000
|
||||
networks:
|
||||
- heralding_local
|
||||
ports:
|
||||
# - "21:21"
|
||||
# - "22:22"
|
||||
# - "23:23"
|
||||
# - "25:25"
|
||||
# - "80:80"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
# - "443:443"
|
||||
- "465:465"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
# - "3306:3306"
|
||||
# - "3389:3389"
|
||||
- "1080:1080"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
||||
|
||||
# Honeyaml service
|
||||
honeyaml:
|
||||
container_name: honeyaml
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- honeyaml_local
|
||||
ports:
|
||||
- "3000:8080"
|
||||
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/honeytrap:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Ipphoney service
|
||||
ipphoney:
|
||||
container_name: ipphoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ipphoney_local
|
||||
ports:
|
||||
- "631:631"
|
||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:25"
|
||||
- "587:25"
|
||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Medpot service
|
||||
medpot:
|
||||
container_name: medpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- medpot_local
|
||||
ports:
|
||||
- "2575:2575"
|
||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
||||
|
||||
# Miniprint service
|
||||
miniprint:
|
||||
container_name: miniprint
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- miniprint_local
|
||||
ports:
|
||||
- "9100:9100"
|
||||
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
|
||||
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
|
||||
|
||||
# Redishoneypot service
|
||||
redishoneypot:
|
||||
container_name: redishoneypot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- redishoneypot_local
|
||||
ports:
|
||||
- "6379:6379"
|
||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
||||
|
||||
# SentryPeer service
|
||||
sentrypeer:
|
||||
container_name: sentrypeer
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
||||
|
||||
#### Snare / Tanner
|
||||
## Tanner Redis Service
|
||||
tanner_redis:
|
||||
container_name: tanner_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## PHP Sandbox service
|
||||
tanner_phpox:
|
||||
container_name: tanner_phpox
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Tanner API Service
|
||||
tanner_api:
|
||||
container_name: tanner_api
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_redis
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
command: tannerapi
|
||||
|
||||
## Tanner Service
|
||||
tanner:
|
||||
container_name: tanner
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_api
|
||||
- tanner_phpox
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
command: tanner
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
||||
|
||||
## Snare Service
|
||||
snare:
|
||||
container_name: snare
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
# Wordpot service
|
||||
wordpot:
|
||||
container_name: wordpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- wordpot_local
|
||||
ports:
|
||||
- "8080:80"
|
||||
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
|
@ -1,856 +0,0 @@
|
|||
# T-Pot: STANDARD
|
||||
networks:
|
||||
adbhoney_local:
|
||||
ciscoasa_local:
|
||||
conpot_local_IEC104:
|
||||
conpot_local_guardian_ast:
|
||||
conpot_local_ipmi:
|
||||
conpot_local_kamstrup_382:
|
||||
cowrie_local:
|
||||
dicompot_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
h0neytr4p_local:
|
||||
heralding_local:
|
||||
honeyaml_local:
|
||||
ipphoney_local:
|
||||
mailoney_local:
|
||||
medpot_local:
|
||||
miniprint_local:
|
||||
redishoneypot_local:
|
||||
sentrypeer_local:
|
||||
tanner_local:
|
||||
wordpot_local:
|
||||
nginx_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Adbhoney service
|
||||
adbhoney:
|
||||
container_name: adbhoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- adbhoney_local
|
||||
ports:
|
||||
- "5555:5555"
|
||||
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
||||
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# Conpot IEC104 service
|
||||
conpot_IEC104:
|
||||
container_name: conpot_iec104
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
||||
- CONPOT_TEMPLATE=IEC104
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_IEC104
|
||||
ports:
|
||||
- "161:161/udp"
|
||||
- "2404:2404"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot guardian_ast service
|
||||
conpot_guardian_ast:
|
||||
container_name: conpot_guardian_ast
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
|
||||
- CONPOT_TEMPLATE=guardian_ast
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_guardian_ast
|
||||
ports:
|
||||
- "10001:10001"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot ipmi
|
||||
conpot_ipmi:
|
||||
container_name: conpot_ipmi
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
||||
- CONPOT_TEMPLATE=ipmi
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_ipmi
|
||||
ports:
|
||||
- "623:623/udp"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot kamstrup_382
|
||||
conpot_kamstrup_382:
|
||||
container_name: conpot_kamstrup_382
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
||||
- CONPOT_TEMPLATE=kamstrup_382
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_kamstrup_382
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/cowrie:uid=2000,gid=2000
|
||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
||||
networks:
|
||||
- cowrie_local
|
||||
ports:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dicompot service
|
||||
# Get the Horos Client for testing: https://horosproject.org/
|
||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
||||
dicompot:
|
||||
container_name: dicompot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dicompot_local
|
||||
ports:
|
||||
- "104:11112"
|
||||
- "11112:11112"
|
||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dionaea_local
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "81:81"
|
||||
- "135:135"
|
||||
# - "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "3306:3306"
|
||||
# - "5060:5060"
|
||||
# - "5060:5060/udp"
|
||||
# - "5061:5061"
|
||||
- "27017:27017"
|
||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# ElasticPot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
||||
|
||||
# H0neytr4p service
|
||||
h0neytr4p:
|
||||
container_name: h0neytr4p
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- h0neytr4p_local
|
||||
ports:
|
||||
- "443:443"
|
||||
# - "80:80"
|
||||
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
|
||||
|
||||
# Heralding service
|
||||
heralding:
|
||||
container_name: heralding
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/heralding:uid=2000,gid=2000
|
||||
networks:
|
||||
- heralding_local
|
||||
ports:
|
||||
# - "21:21"
|
||||
# - "22:22"
|
||||
# - "23:23"
|
||||
# - "25:25"
|
||||
# - "80:80"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
# - "443:443"
|
||||
- "465:465"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
# - "3306:3306"
|
||||
# - "3389:3389"
|
||||
- "1080:1080"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
||||
|
||||
# Honeyaml service
|
||||
honeyaml:
|
||||
container_name: honeyaml
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- honeyaml_local
|
||||
ports:
|
||||
- "3000:8080"
|
||||
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/honeytrap:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Ipphoney service
|
||||
ipphoney:
|
||||
container_name: ipphoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ipphoney_local
|
||||
ports:
|
||||
- "631:631"
|
||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:25"
|
||||
- "587:25"
|
||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Medpot service
|
||||
medpot:
|
||||
container_name: medpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- medpot_local
|
||||
ports:
|
||||
- "2575:2575"
|
||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
||||
|
||||
# Miniprint service
|
||||
miniprint:
|
||||
container_name: miniprint
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- miniprint_local
|
||||
ports:
|
||||
- "9100:9100"
|
||||
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
|
||||
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
|
||||
|
||||
# Redishoneypot service
|
||||
redishoneypot:
|
||||
container_name: redishoneypot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- redishoneypot_local
|
||||
ports:
|
||||
- "6379:6379"
|
||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
||||
|
||||
# SentryPeer service
|
||||
sentrypeer:
|
||||
container_name: sentrypeer
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
||||
|
||||
#### Snare / Tanner
|
||||
## Tanner Redis Service
|
||||
tanner_redis:
|
||||
container_name: tanner_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## PHP Sandbox service
|
||||
tanner_phpox:
|
||||
container_name: tanner_phpox
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Tanner API Service
|
||||
tanner_api:
|
||||
container_name: tanner_api
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_redis
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
command: tannerapi
|
||||
|
||||
## Tanner Service
|
||||
tanner:
|
||||
container_name: tanner
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_api
|
||||
- tanner_phpox
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
command: tanner
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
||||
|
||||
## Snare Service
|
||||
snare:
|
||||
container_name: snare
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
# Wordpot service
|
||||
wordpot:
|
||||
container_name: wordpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- wordpot_local
|
||||
ports:
|
||||
- "8080:80"
|
||||
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Map Redis Service
|
||||
map_redis:
|
||||
container_name: map_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Map Web Service
|
||||
map_web:
|
||||
container_name: map_web
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=AttackMapServer.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Map Data Service
|
||||
map_data:
|
||||
container_name: map_data
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Nginx service
|
||||
nginx:
|
||||
container_name: nginx
|
||||
restart: always
|
||||
environment:
|
||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /var/tmp/nginx/client_body
|
||||
- /var/tmp/nginx/proxy
|
||||
- /var/tmp/nginx/fastcgi
|
||||
- /var/tmp/nginx/uwsgi
|
||||
- /var/tmp/nginx/scgi
|
||||
- /run
|
||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "64297:64297"
|
||||
- "64294:64294"
|
||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
||||
|
|
@ -1,406 +0,0 @@
|
|||
# T-Pot: TARPIT
|
||||
networks:
|
||||
ddospot_local:
|
||||
endlessh_local:
|
||||
go-pot_local:
|
||||
hellpot_local:
|
||||
heralding_local:
|
||||
nginx_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Ddospot service
|
||||
ddospot:
|
||||
container_name: ddospot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ddospot_local
|
||||
ports:
|
||||
- "19:19/udp"
|
||||
- "53:53/udp"
|
||||
- "123:123/udp"
|
||||
# - "161:161/udp"
|
||||
- "1900:1900/udp"
|
||||
image: ${TPOT_REPO}/ddospot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ddospot/log:/opt/ddospot/ddospot/logs
|
||||
- ${TPOT_DATA_PATH}/ddospot/bl:/opt/ddospot/ddospot/bl
|
||||
- ${TPOT_DATA_PATH}/ddospot/db:/opt/ddospot/ddospot/db
|
||||
|
||||
# Endlessh service
|
||||
endlessh:
|
||||
container_name: endlessh
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- endlessh_local
|
||||
ports:
|
||||
- "22:2222"
|
||||
image: ${TPOT_REPO}/endlessh:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/endlessh/log:/var/log/endlessh
|
||||
|
||||
# Go-pot service
|
||||
go-pot:
|
||||
container_name: go-pot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- go-pot_local
|
||||
ports:
|
||||
- "8080:8080"
|
||||
image: ${TPOT_REPO}/go-pot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/go-pot/log:/opt/go-pot/log/
|
||||
|
||||
# Hellpot service
|
||||
hellpot:
|
||||
container_name: hellpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- hellpot_local
|
||||
ports:
|
||||
- "80:8080"
|
||||
image: ${TPOT_REPO}/hellpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/hellpot/log:/var/log/hellpot
|
||||
|
||||
# Heralding service
|
||||
heralding:
|
||||
container_name: heralding
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/heralding:uid=2000,gid=2000
|
||||
networks:
|
||||
- heralding_local
|
||||
ports:
|
||||
- "21:21"
|
||||
# - "22:22"
|
||||
- "23:23"
|
||||
- "25:25"
|
||||
# - "80:80"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
- "443:443"
|
||||
- "465:465"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
- "3306:3306"
|
||||
- "3389:3389"
|
||||
- "1080:1080"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
||||
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Map Redis Service
|
||||
map_redis:
|
||||
container_name: map_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Map Web Service
|
||||
map_web:
|
||||
container_name: map_web
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=AttackMapServer.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Map Data Service
|
||||
map_data:
|
||||
container_name: map_data
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Nginx service
|
||||
nginx:
|
||||
container_name: nginx
|
||||
restart: always
|
||||
environment:
|
||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /var/tmp/nginx/client_body
|
||||
- /var/tmp/nginx/proxy
|
||||
- /var/tmp/nginx/fastcgi
|
||||
- /var/tmp/nginx/uwsgi
|
||||
- /var/tmp/nginx/scgi
|
||||
- /run
|
||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "64297:64297"
|
||||
- "64294:64294"
|
||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
||||
153
deploy.sh
|
|
@ -1,153 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
myANSIBLE_PORT=64295
|
||||
myANSIBLE_TPOT_PLAYBOOK="installer/install/deploy.yml"
|
||||
myADJECTIVE=$(shuf -n1 installer/install/a.txt)
|
||||
myNOUN=$(shuf -n1 installer/install/n.txt)
|
||||
myENV_FILE="$HOME/tpotce/.env"
|
||||
|
||||
myDEPLOY=$(cat << "EOF"
|
||||
|
||||
____ [ T-Pot ] ____ _
|
||||
/ ___| ___ _ __ ___ ___ _ __ | _ \ ___ _ __ | | ___ _ _
|
||||
\___ \ / _ \ _ \/ __|/ _ \| __| | | | |/ _ \ _ \| |/ _ \| | | |
|
||||
___) | __/ | | \__ \ (_) | | | |_| | __/ |_) | | (_) | |_| |
|
||||
|____/ \___|_| |_|___/\___/|_| |____/ \___| .__/|_|\___/ \__, |
|
||||
|_| |___/
|
||||
|
||||
EOF
|
||||
)
|
||||
|
||||
# Check if the script is running in a HIVE installation
|
||||
if ! grep -q 'TPOT_TYPE=HIVE' "$HOME/tpotce/.env";
|
||||
then
|
||||
echo "# This script is only supported on HIVE installations."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if running on a supported distribution
|
||||
mySUPPORTED_DISTRIBUTIONS=("AlmaLinux" "Debian GNU/Linux" "Fedora Linux" "openSUSE Tumbleweed" "Raspbian GNU/Linux" "Rocky Linux" "Ubuntu")
|
||||
myCURRENT_DISTRIBUTION=$(awk -F= '/^NAME/{print $2}' /etc/os-release | tr -d '"')
|
||||
|
||||
if [[ ! " ${mySUPPORTED_DISTRIBUTIONS[@]} " =~ " ${myCURRENT_DISTRIBUTION} " ]];
|
||||
then
|
||||
echo "# Only the following distributions are supported: AlmaLinux, Fedora, Debian, openSUSE Tumbleweed, Rocky Linux and Ubuntu."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "${myDEPLOY}"
|
||||
echo
|
||||
echo "# This script will prepare a T-Pot SENSOR installation to transmit logs into this HIVE."
|
||||
echo
|
||||
|
||||
# Ask if a T-Pot SENSOR was installed
|
||||
read -p "# Was a T-Pot SENSOR installed? (y/n): " mySENSOR_INSTALLED
|
||||
if [[ ${mySENSOR_INSTALLED} != "y" ]];
|
||||
then
|
||||
echo "# A T-Pot SENSOR must be installed to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ask for the remote user
|
||||
read -p "# Enter the remote username T-Pot SENSOR was installed with: " mySSHUSER
|
||||
if [[ ${mySSHUSER} == "" ]];
|
||||
then
|
||||
echo "# You need to enter a user. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate IP/domain name loop
|
||||
while true; do
|
||||
read -p "# Enter the IP/domain name of the SENSOR: " mySENSOR_IP
|
||||
if [[ ${mySENSOR_IP} =~ ^([a-zA-Z0-9]+(\.[a-zA-Z0-9]+)*\.[a-zA-Z]{2,})|(([0-9]{1,3}\.){3}[0-9]{1,3})$ ]];
|
||||
then
|
||||
break
|
||||
else
|
||||
echo "# Invalid IP/domain. Please enter a valid IP or domain name."
|
||||
fi
|
||||
done
|
||||
|
||||
# Check if ssh key has been deployed
|
||||
read -p "# Has a SSH key been deployed to the SENSOR? (y/n): " mySSHKEY_DEPLOYED
|
||||
if [[ ${mySSHKEY_DEPLOYED} != "y" ]];
|
||||
then
|
||||
echo "# Generate a SSH key using 'ssh-keygen' and deploy it to the SENSOR (Example: ssh-copy-id -p 64295 ${mySSHUSER}@${mySENSOR_IP})."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate IP/domain name of HIVE
|
||||
while true; do
|
||||
read -p "# Enter the IP/domain name of this HIVE: " myTPOT_HIVE_IP
|
||||
if [[ ${myTPOT_HIVE_IP} =~ ^([a-zA-Z0-9]+(\.[a-zA-Z0-9]+)*\.[a-zA-Z]{2,})|(([0-9]{1,3}\.){3}[0-9]{1,3})$ ]];
|
||||
then
|
||||
break
|
||||
else
|
||||
echo "# Invalid IP/domain. Please enter a valid IP or domain name."
|
||||
fi
|
||||
done
|
||||
|
||||
# Create a random SENSOR user name that is easily readable
|
||||
myLS_WEB_USER="sensor-${myADJECTIVE}-${myNOUN}"
|
||||
|
||||
# Create a random password
|
||||
myLS_WEB_PW=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 32 | head -n 1)
|
||||
|
||||
# Create myLS_WEB_USER_ENC
|
||||
myLS_WEB_USER_ENC=$(htpasswd -b -n "${myLS_WEB_USER}" "${myLS_WEB_PW}")
|
||||
myLS_WEB_USER_ENC_B64=$(echo -n "${myLS_WEB_USER_ENC}" | base64 -w0)
|
||||
|
||||
# Create myTPOT_HIVE_USER, since this is for Logstash on the SENSOR, it needs to directly base64 encoded
|
||||
myTPOT_HIVE_USER=$(echo -n "${myLS_WEB_USER}:${myLS_WEB_PW}" | base64 -w0)
|
||||
|
||||
# Print credentials
|
||||
echo "# The following SENSOR credentials have been created:"
|
||||
echo "# New SENSOR username: ${myLS_WEB_USER}"
|
||||
echo "# New SENSOR passowrd: ${myLS_WEB_PW}"
|
||||
echo "# New htpasswd encoded credentials: ${myLS_WEB_USER_ENC}"
|
||||
echo "# New htpasswd credentials base64 encoded: ${myLS_WEB_USER_ENC_B64}"
|
||||
echo "# New SENSOR credentials base64 encoded: ${myTPOT_HIVE_USER}"
|
||||
echo
|
||||
echo "# Ansible will ask for the ‘BECOME password‘ which is typically the password you ’sudo’ with on the SENSOR."
|
||||
echo "# The password will allow Ansible to run a reboot via sudo on the SENSOR."
|
||||
echo
|
||||
|
||||
# Read LS_WEB_USER from file
|
||||
myENV_LS_WEB_USER=$(grep "^LS_WEB_USER=" "${myENV_FILE}" | sed 's/^LS_WEB_USER=//g' | tr -d "\"'")
|
||||
|
||||
# Add the new SENSOR user
|
||||
if [ "${myENV_LS_WEB_USER}" == "" ];
|
||||
then
|
||||
myENV_LS_WEB_USER="${myLS_WEB_USER_ENC_B64}"
|
||||
else
|
||||
myENV_LS_WEB_USER="${myENV_LS_WEB_USER} ${myLS_WEB_USER_ENC_B64}"
|
||||
fi
|
||||
|
||||
# Need to export for Ansible
|
||||
export myTPOT_HIVE_USER
|
||||
export myTPOT_HIVE_IP
|
||||
|
||||
ANSIBLE_LOG_PATH=${HOME}/tpotce/data/deploy_sensor.log ansible-playbook ${myANSIBLE_TPOT_PLAYBOOK} -i ${mySENSOR_IP}, -c ssh -u ${mySSHUSER} --ask-become-pass -e "ansible_port=${myANSIBLE_PORT}"
|
||||
|
||||
if [ "$?" == 0 ];
|
||||
then
|
||||
# Update the T-Pot .env config and lswebpasswd (avoid the need to restart T-Pot) on the host
|
||||
echo "# Updating SENSOR users on this HIVE and in the T-Pot .env config:"
|
||||
sed -i "/^LS_WEB_USER=/c\LS_WEB_USER=$myENV_LS_WEB_USER" "${myENV_FILE}"
|
||||
: > "${HOME}"/tpotce/data/nginx/conf/lswebpasswd
|
||||
for i in $myENV_LS_WEB_USER;
|
||||
do
|
||||
if [[ -n $i ]];
|
||||
then
|
||||
# Need to control newlines as they kept coming up for some reason
|
||||
echo -n "$i" | base64 -d -w0
|
||||
echo
|
||||
echo -n "$i" | base64 -d -w0 | tr -d '\n' >> ${HOME}/tpotce/data/nginx/conf/lswebpasswd
|
||||
echo >> ${HOME}/tpotce/data/nginx/conf/lswebpasswd
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
unset myTPOT_HIVE_USER
|
||||
unset myTPOT_HIVE_IP
|
||||
216
doc/Makefile
Normal file
|
|
@ -0,0 +1,216 @@
|
|||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = build
|
||||
|
||||
# User-friendly check for sphinx-build
|
||||
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
|
||||
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
|
||||
endif
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " applehelp to make an Apple Help Book"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " texinfo to make Texinfo files"
|
||||
@echo " info to make Texinfo files and run them through makeinfo"
|
||||
@echo " gettext to make PO message catalogs"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " xml to make Docutils-native XML files"
|
||||
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
@echo " coverage to run coverage check of the documentation (if enabled)"
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)/*
|
||||
|
||||
.PHONY: html
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
.PHONY: dirhtml
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
.PHONY: singlehtml
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
.PHONY: pickle
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
.PHONY: json
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
.PHONY: htmlhelp
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
.PHONY: qthelp
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/T-Pot.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/T-Pot.qhc"
|
||||
|
||||
.PHONY: applehelp
|
||||
applehelp:
|
||||
$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
|
||||
@echo
|
||||
@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
|
||||
@echo "N.B. You won't be able to view it unless you put it in" \
|
||||
"~/Library/Documentation/Help or install it in your application" \
|
||||
"bundle."
|
||||
|
||||
.PHONY: devhelp
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/T-Pot"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/T-Pot"
|
||||
@echo "# devhelp"
|
||||
|
||||
.PHONY: epub
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
.PHONY: latex
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
.PHONY: latexpdf
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
.PHONY: latexpdfja
|
||||
latexpdfja:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through platex and dvipdfmx..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
.PHONY: text
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
.PHONY: man
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
.PHONY: texinfo
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
.PHONY: info
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
.PHONY: gettext
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
.PHONY: changes
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
.PHONY: linkcheck
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
.PHONY: doctest
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
||||
|
||||
.PHONY: coverage
|
||||
coverage:
|
||||
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
|
||||
@echo "Testing of coverage in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/coverage/python.txt."
|
||||
|
||||
.PHONY: xml
|
||||
xml:
|
||||
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
|
||||
@echo
|
||||
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
|
||||
|
||||
.PHONY: pseudoxml
|
||||
pseudoxml:
|
||||
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
||||
@echo
|
||||
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
||||
|
Before Width: | Height: | Size: 443 KiB After Width: | Height: | Size: 132 KiB |
|
Before Width: | Height: | Size: 480 KiB |
BIN
doc/build/doctrees/environment.pickle
vendored
Normal file
BIN
doc/build/doctrees/index.doctree
vendored
Normal file
4
doc/build/html/.buildinfo
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
# Sphinx build info version 1
|
||||
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
|
||||
config: fae7c9d3df0173e81358661e32fdb8fe
|
||||
tags: 645f666f9bcd5a90fca523b33c5a78b7
|
||||
22
doc/build/html/_sources/index.txt
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
.. T-Pot documentation master file, created by
|
||||
sphinx-quickstart on Mon Aug 8 13:24:39 2016.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to T-Pot's documentation!
|
||||
=================================
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
||||
BIN
doc/build/html/_static/ajax-loader.gif
vendored
Normal file
|
After Width: | Height: | Size: 673 B |
599
doc/build/html/_static/basic.css
vendored
Normal file
|
|
@ -0,0 +1,599 @@
|
|||
/*
|
||||
* basic.css
|
||||
* ~~~~~~~~~
|
||||
*
|
||||
* Sphinx stylesheet -- basic theme.
|
||||
*
|
||||
* :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
/* -- main layout ----------------------------------------------------------- */
|
||||
|
||||
div.clearer {
|
||||
clear: both;
|
||||
}
|
||||
|
||||
/* -- relbar ---------------------------------------------------------------- */
|
||||
|
||||
div.related {
|
||||
width: 100%;
|
||||
font-size: 90%;
|
||||
}
|
||||
|
||||
div.related h3 {
|
||||
display: none;
|
||||
}
|
||||
|
||||
div.related ul {
|
||||
margin: 0;
|
||||
padding: 0 0 0 10px;
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
div.related li {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
div.related li.right {
|
||||
float: right;
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
||||
/* -- sidebar --------------------------------------------------------------- */
|
||||
|
||||
div.sphinxsidebarwrapper {
|
||||
padding: 10px 5px 0 10px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar {
|
||||
float: left;
|
||||
width: 230px;
|
||||
margin-left: -100%;
|
||||
font-size: 90%;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul {
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul ul,
|
||||
div.sphinxsidebar ul.want-points {
|
||||
margin-left: 20px;
|
||||
list-style: square;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul ul {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
div.sphinxsidebar form {
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar input {
|
||||
border: 1px solid #98dbcc;
|
||||
font-family: sans-serif;
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
div.sphinxsidebar #searchbox input[type="text"] {
|
||||
width: 170px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar #searchbox input[type="submit"] {
|
||||
width: 30px;
|
||||
}
|
||||
|
||||
img {
|
||||
border: 0;
|
||||
max-width: 100%;
|
||||
}
|
||||
|
||||
/* -- search page ----------------------------------------------------------- */
|
||||
|
||||
ul.search {
|
||||
margin: 10px 0 0 20px;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
ul.search li {
|
||||
padding: 5px 0 5px 20px;
|
||||
background-image: url(file.png);
|
||||
background-repeat: no-repeat;
|
||||
background-position: 0 7px;
|
||||
}
|
||||
|
||||
ul.search li a {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
ul.search li div.context {
|
||||
color: #888;
|
||||
margin: 2px 0 0 30px;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
ul.keywordmatches li.goodmatch a {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
/* -- index page ------------------------------------------------------------ */
|
||||
|
||||
table.contentstable {
|
||||
width: 90%;
|
||||
}
|
||||
|
||||
table.contentstable p.biglink {
|
||||
line-height: 150%;
|
||||
}
|
||||
|
||||
a.biglink {
|
||||
font-size: 1.3em;
|
||||
}
|
||||
|
||||
span.linkdescr {
|
||||
font-style: italic;
|
||||
padding-top: 5px;
|
||||
font-size: 90%;
|
||||
}
|
||||
|
||||
/* -- general index --------------------------------------------------------- */
|
||||
|
||||
table.indextable {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
table.indextable td {
|
||||
text-align: left;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
table.indextable dl, table.indextable dd {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
table.indextable tr.pcap {
|
||||
height: 10px;
|
||||
}
|
||||
|
||||
table.indextable tr.cap {
|
||||
margin-top: 10px;
|
||||
background-color: #f2f2f2;
|
||||
}
|
||||
|
||||
img.toggler {
|
||||
margin-right: 3px;
|
||||
margin-top: 3px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
div.modindex-jumpbox {
|
||||
border-top: 1px solid #ddd;
|
||||
border-bottom: 1px solid #ddd;
|
||||
margin: 1em 0 1em 0;
|
||||
padding: 0.4em;
|
||||
}
|
||||
|
||||
div.genindex-jumpbox {
|
||||
border-top: 1px solid #ddd;
|
||||
border-bottom: 1px solid #ddd;
|
||||
margin: 1em 0 1em 0;
|
||||
padding: 0.4em;
|
||||
}
|
||||
|
||||
/* -- general body styles --------------------------------------------------- */
|
||||
|
||||
a.headerlink {
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
h1:hover > a.headerlink,
|
||||
h2:hover > a.headerlink,
|
||||
h3:hover > a.headerlink,
|
||||
h4:hover > a.headerlink,
|
||||
h5:hover > a.headerlink,
|
||||
h6:hover > a.headerlink,
|
||||
dt:hover > a.headerlink,
|
||||
caption:hover > a.headerlink,
|
||||
p.caption:hover > a.headerlink,
|
||||
div.code-block-caption:hover > a.headerlink {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
div.body p.caption {
|
||||
text-align: inherit;
|
||||
}
|
||||
|
||||
div.body td {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.field-list ul {
|
||||
padding-left: 1em;
|
||||
}
|
||||
|
||||
.first {
|
||||
margin-top: 0 !important;
|
||||
}
|
||||
|
||||
p.rubric {
|
||||
margin-top: 30px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
img.align-left, .figure.align-left, object.align-left {
|
||||
clear: left;
|
||||
float: left;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
img.align-right, .figure.align-right, object.align-right {
|
||||
clear: right;
|
||||
float: right;
|
||||
margin-left: 1em;
|
||||
}
|
||||
|
||||
img.align-center, .figure.align-center, object.align-center {
|
||||
display: block;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
}
|
||||
|
||||
.align-left {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.align-center {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.align-right {
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
/* -- sidebars -------------------------------------------------------------- */
|
||||
|
||||
div.sidebar {
|
||||
margin: 0 0 0.5em 1em;
|
||||
border: 1px solid #ddb;
|
||||
padding: 7px 7px 0 7px;
|
||||
background-color: #ffe;
|
||||
width: 40%;
|
||||
float: right;
|
||||
}
|
||||
|
||||
p.sidebar-title {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
/* -- topics ---------------------------------------------------------------- */
|
||||
|
||||
div.topic {
|
||||
border: 1px solid #ccc;
|
||||
padding: 7px 7px 0 7px;
|
||||
margin: 10px 0 10px 0;
|
||||
}
|
||||
|
||||
p.topic-title {
|
||||
font-size: 1.1em;
|
||||
font-weight: bold;
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
/* -- admonitions ----------------------------------------------------------- */
|
||||
|
||||
div.admonition {
|
||||
margin-top: 10px;
|
||||
margin-bottom: 10px;
|
||||
padding: 7px;
|
||||
}
|
||||
|
||||
div.admonition dt {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
div.admonition dl {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
p.admonition-title {
|
||||
margin: 0px 10px 5px 0px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
div.body p.centered {
|
||||
text-align: center;
|
||||
margin-top: 25px;
|
||||
}
|
||||
|
||||
/* -- tables ---------------------------------------------------------------- */
|
||||
|
||||
table.docutils {
|
||||
border: 0;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
table caption span.caption-number {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
table caption span.caption-text {
|
||||
}
|
||||
|
||||
table.docutils td, table.docutils th {
|
||||
padding: 1px 8px 1px 5px;
|
||||
border-top: 0;
|
||||
border-left: 0;
|
||||
border-right: 0;
|
||||
border-bottom: 1px solid #aaa;
|
||||
}
|
||||
|
||||
table.field-list td, table.field-list th {
|
||||
border: 0 !important;
|
||||
}
|
||||
|
||||
table.footnote td, table.footnote th {
|
||||
border: 0 !important;
|
||||
}
|
||||
|
||||
th {
|
||||
text-align: left;
|
||||
padding-right: 5px;
|
||||
}
|
||||
|
||||
table.citation {
|
||||
border-left: solid 1px gray;
|
||||
margin-left: 1px;
|
||||
}
|
||||
|
||||
table.citation td {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
/* -- figures --------------------------------------------------------------- */
|
||||
|
||||
div.figure {
|
||||
margin: 0.5em;
|
||||
padding: 0.5em;
|
||||
}
|
||||
|
||||
div.figure p.caption {
|
||||
padding: 0.3em;
|
||||
}
|
||||
|
||||
div.figure p.caption span.caption-number {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
div.figure p.caption span.caption-text {
|
||||
}
|
||||
|
||||
|
||||
/* -- other body styles ----------------------------------------------------- */
|
||||
|
||||
ol.arabic {
|
||||
list-style: decimal;
|
||||
}
|
||||
|
||||
ol.loweralpha {
|
||||
list-style: lower-alpha;
|
||||
}
|
||||
|
||||
ol.upperalpha {
|
||||
list-style: upper-alpha;
|
||||
}
|
||||
|
||||
ol.lowerroman {
|
||||
list-style: lower-roman;
|
||||
}
|
||||
|
||||
ol.upperroman {
|
||||
list-style: upper-roman;
|
||||
}
|
||||
|
||||
dl {
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
|
||||
dd p {
|
||||
margin-top: 0px;
|
||||
}
|
||||
|
||||
dd ul, dd table {
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
dd {
|
||||
margin-top: 3px;
|
||||
margin-bottom: 10px;
|
||||
margin-left: 30px;
|
||||
}
|
||||
|
||||
dt:target, .highlighted {
|
||||
background-color: #fbe54e;
|
||||
}
|
||||
|
||||
dl.glossary dt {
|
||||
font-weight: bold;
|
||||
font-size: 1.1em;
|
||||
}
|
||||
|
||||
.field-list ul {
|
||||
margin: 0;
|
||||
padding-left: 1em;
|
||||
}
|
||||
|
||||
.field-list p {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.optional {
|
||||
font-size: 1.3em;
|
||||
}
|
||||
|
||||
.sig-paren {
|
||||
font-size: larger;
|
||||
}
|
||||
|
||||
.versionmodified {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.system-message {
|
||||
background-color: #fda;
|
||||
padding: 5px;
|
||||
border: 3px solid red;
|
||||
}
|
||||
|
||||
.footnote:target {
|
||||
background-color: #ffa;
|
||||
}
|
||||
|
||||
.line-block {
|
||||
display: block;
|
||||
margin-top: 1em;
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
|
||||
.line-block .line-block {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
margin-left: 1.5em;
|
||||
}
|
||||
|
||||
.guilabel, .menuselection {
|
||||
font-family: sans-serif;
|
||||
}
|
||||
|
||||
.accelerator {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
.classifier {
|
||||
font-style: oblique;
|
||||
}
|
||||
|
||||
abbr, acronym {
|
||||
border-bottom: dotted 1px;
|
||||
cursor: help;
|
||||
}
|
||||
|
||||
/* -- code displays --------------------------------------------------------- */
|
||||
|
||||
pre {
|
||||
overflow: auto;
|
||||
overflow-y: hidden; /* fixes display issues on Chrome browsers */
|
||||
}
|
||||
|
||||
td.linenos pre {
|
||||
padding: 5px 0px;
|
||||
border: 0;
|
||||
background-color: transparent;
|
||||
color: #aaa;
|
||||
}
|
||||
|
||||
table.highlighttable {
|
||||
margin-left: 0.5em;
|
||||
}
|
||||
|
||||
table.highlighttable td {
|
||||
padding: 0 0.5em 0 0.5em;
|
||||
}
|
||||
|
||||
div.code-block-caption {
|
||||
padding: 2px 5px;
|
||||
font-size: small;
|
||||
}
|
||||
|
||||
div.code-block-caption code {
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
div.code-block-caption + div > div.highlight > pre {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
div.code-block-caption span.caption-number {
|
||||
padding: 0.1em 0.3em;
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
div.code-block-caption span.caption-text {
|
||||
}
|
||||
|
||||
div.literal-block-wrapper {
|
||||
padding: 1em 1em 0;
|
||||
}
|
||||
|
||||
div.literal-block-wrapper div.highlight {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
code.descname {
|
||||
background-color: transparent;
|
||||
font-weight: bold;
|
||||
font-size: 1.2em;
|
||||
}
|
||||
|
||||
code.descclassname {
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
code.xref, a code {
|
||||
background-color: transparent;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
h1 code, h2 code, h3 code, h4 code, h5 code, h6 code {
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
.viewcode-link {
|
||||
float: right;
|
||||
}
|
||||
|
||||
.viewcode-back {
|
||||
float: right;
|
||||
font-family: sans-serif;
|
||||
}
|
||||
|
||||
div.viewcode-block:target {
|
||||
margin: -1px -10px;
|
||||
padding: 0 10px;
|
||||
}
|
||||
|
||||
/* -- math display ---------------------------------------------------------- */
|
||||
|
||||
img.math {
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
div.body div.math p {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
span.eqno {
|
||||
float: right;
|
||||
}
|
||||
|
||||
/* -- printout stylesheet --------------------------------------------------- */
|
||||
|
||||
@media print {
|
||||
div.document,
|
||||
div.documentwrapper,
|
||||
div.bodywrapper {
|
||||
margin: 0 !important;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.sphinxsidebar,
|
||||
div.related,
|
||||
div.footer,
|
||||
#top-link {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
261
doc/build/html/_static/classic.css
vendored
Normal file
|
|
@ -0,0 +1,261 @@
|
|||
/*
|
||||
* default.css_t
|
||||
* ~~~~~~~~~~~~~
|
||||
*
|
||||
* Sphinx stylesheet -- default theme.
|
||||
*
|
||||
* :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
@import url("basic.css");
|
||||
|
||||
/* -- page layout ----------------------------------------------------------- */
|
||||
|
||||
body {
|
||||
font-family: sans-serif;
|
||||
font-size: 100%;
|
||||
background-color: #11303d;
|
||||
color: #000;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.document {
|
||||
background-color: #1c4e63;
|
||||
}
|
||||
|
||||
div.documentwrapper {
|
||||
float: left;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.bodywrapper {
|
||||
margin: 0 0 0 230px;
|
||||
}
|
||||
|
||||
div.body {
|
||||
background-color: #ffffff;
|
||||
color: #000000;
|
||||
padding: 0 20px 30px 20px;
|
||||
}
|
||||
|
||||
div.footer {
|
||||
color: #ffffff;
|
||||
width: 100%;
|
||||
padding: 9px 0 9px 0;
|
||||
text-align: center;
|
||||
font-size: 75%;
|
||||
}
|
||||
|
||||
div.footer a {
|
||||
color: #ffffff;
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
div.related {
|
||||
background-color: #133f52;
|
||||
line-height: 30px;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
div.related a {
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
div.sphinxsidebar {
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3 {
|
||||
font-family: 'Trebuchet MS', sans-serif;
|
||||
color: #ffffff;
|
||||
font-size: 1.4em;
|
||||
font-weight: normal;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3 a {
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h4 {
|
||||
font-family: 'Trebuchet MS', sans-serif;
|
||||
color: #ffffff;
|
||||
font-size: 1.3em;
|
||||
font-weight: normal;
|
||||
margin: 5px 0 0 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.sphinxsidebar p {
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
div.sphinxsidebar p.topless {
|
||||
margin: 5px 10px 10px 10px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul {
|
||||
margin: 10px;
|
||||
padding: 0;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
div.sphinxsidebar a {
|
||||
color: #98dbcc;
|
||||
}
|
||||
|
||||
div.sphinxsidebar input {
|
||||
border: 1px solid #98dbcc;
|
||||
font-family: sans-serif;
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* -- hyperlink styles ------------------------------------------------------ */
|
||||
|
||||
a {
|
||||
color: #355f7c;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a:visited {
|
||||
color: #355f7c;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* -- body styles ----------------------------------------------------------- */
|
||||
|
||||
div.body h1,
|
||||
div.body h2,
|
||||
div.body h3,
|
||||
div.body h4,
|
||||
div.body h5,
|
||||
div.body h6 {
|
||||
font-family: 'Trebuchet MS', sans-serif;
|
||||
background-color: #f2f2f2;
|
||||
font-weight: normal;
|
||||
color: #20435c;
|
||||
border-bottom: 1px solid #ccc;
|
||||
margin: 20px -20px 10px -20px;
|
||||
padding: 3px 0 3px 10px;
|
||||
}
|
||||
|
||||
div.body h1 { margin-top: 0; font-size: 200%; }
|
||||
div.body h2 { font-size: 160%; }
|
||||
div.body h3 { font-size: 140%; }
|
||||
div.body h4 { font-size: 120%; }
|
||||
div.body h5 { font-size: 110%; }
|
||||
div.body h6 { font-size: 100%; }
|
||||
|
||||
a.headerlink {
|
||||
color: #c60f0f;
|
||||
font-size: 0.8em;
|
||||
padding: 0 4px 0 4px;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a.headerlink:hover {
|
||||
background-color: #c60f0f;
|
||||
color: white;
|
||||
}
|
||||
|
||||
div.body p, div.body dd, div.body li, div.body blockquote {
|
||||
text-align: justify;
|
||||
line-height: 130%;
|
||||
}
|
||||
|
||||
div.admonition p.admonition-title + p {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
div.admonition p {
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
div.admonition pre {
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
div.admonition ul, div.admonition ol {
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
div.note {
|
||||
background-color: #eee;
|
||||
border: 1px solid #ccc;
|
||||
}
|
||||
|
||||
div.seealso {
|
||||
background-color: #ffc;
|
||||
border: 1px solid #ff6;
|
||||
}
|
||||
|
||||
div.topic {
|
||||
background-color: #eee;
|
||||
}
|
||||
|
||||
div.warning {
|
||||
background-color: #ffe4e4;
|
||||
border: 1px solid #f66;
|
||||
}
|
||||
|
||||
p.admonition-title {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
p.admonition-title:after {
|
||||
content: ":";
|
||||
}
|
||||
|
||||
pre {
|
||||
padding: 5px;
|
||||
background-color: #eeffcc;
|
||||
color: #333333;
|
||||
line-height: 120%;
|
||||
border: 1px solid #ac9;
|
||||
border-left: none;
|
||||
border-right: none;
|
||||
}
|
||||
|
||||
code {
|
||||
background-color: #ecf0f3;
|
||||
padding: 0 1px 0 1px;
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
th {
|
||||
background-color: #ede;
|
||||
}
|
||||
|
||||
.warning code {
|
||||
background: #efc2c2;
|
||||
}
|
||||
|
||||
.note code {
|
||||
background: #d6d6d6;
|
||||
}
|
||||
|
||||
.viewcode-back {
|
||||
font-family: sans-serif;
|
||||
}
|
||||
|
||||
div.viewcode-block:target {
|
||||
background-color: #f4debf;
|
||||
border-top: 1px solid #ac9;
|
||||
border-bottom: 1px solid #ac9;
|
||||
}
|
||||
|
||||
div.code-block-caption {
|
||||
color: #efefef;
|
||||
background-color: #1c4e63;
|
||||
}
|
||||
BIN
doc/build/html/_static/comment-bright.png
vendored
Normal file
|
After Width: | Height: | Size: 3.4 KiB |
BIN
doc/build/html/_static/comment-close.png
vendored
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
BIN
doc/build/html/_static/comment.png
vendored
Normal file
|
After Width: | Height: | Size: 3.4 KiB |
1
doc/build/html/_static/default.css
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
@import url("classic.css");
|
||||
263
doc/build/html/_static/doctools.js
vendored
Normal file
|
|
@ -0,0 +1,263 @@
|
|||
/*
|
||||
* doctools.js
|
||||
* ~~~~~~~~~~~
|
||||
*
|
||||
* Sphinx JavaScript utilities for all documentation.
|
||||
*
|
||||
* :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* select a different prefix for underscore
|
||||
*/
|
||||
$u = _.noConflict();
|
||||
|
||||
/**
|
||||
* make the code below compatible with browsers without
|
||||
* an installed firebug like debugger
|
||||
if (!window.console || !console.firebug) {
|
||||
var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
|
||||
"dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
|
||||
"profile", "profileEnd"];
|
||||
window.console = {};
|
||||
for (var i = 0; i < names.length; ++i)
|
||||
window.console[names[i]] = function() {};
|
||||
}
|
||||
*/
|
||||
|
||||
/**
|
||||
* small helper function to urldecode strings
|
||||
*/
|
||||
jQuery.urldecode = function(x) {
|
||||
return decodeURIComponent(x).replace(/\+/g, ' ');
|
||||
};
|
||||
|
||||
/**
|
||||
* small helper function to urlencode strings
|
||||
*/
|
||||
jQuery.urlencode = encodeURIComponent;
|
||||
|
||||
/**
|
||||
* This function returns the parsed url parameters of the
|
||||
* current request. Multiple values per key are supported,
|
||||
* it will always return arrays of strings for the value parts.
|
||||
*/
|
||||
jQuery.getQueryParameters = function(s) {
|
||||
if (typeof s == 'undefined')
|
||||
s = document.location.search;
|
||||
var parts = s.substr(s.indexOf('?') + 1).split('&');
|
||||
var result = {};
|
||||
for (var i = 0; i < parts.length; i++) {
|
||||
var tmp = parts[i].split('=', 2);
|
||||
var key = jQuery.urldecode(tmp[0]);
|
||||
var value = jQuery.urldecode(tmp[1]);
|
||||
if (key in result)
|
||||
result[key].push(value);
|
||||
else
|
||||
result[key] = [value];
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
* highlight a given string on a jquery object by wrapping it in
|
||||
* span elements with the given class name.
|
||||
*/
|
||||
jQuery.fn.highlightText = function(text, className) {
|
||||
function highlight(node) {
|
||||
if (node.nodeType == 3) {
|
||||
var val = node.nodeValue;
|
||||
var pos = val.toLowerCase().indexOf(text);
|
||||
if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) {
|
||||
var span = document.createElement("span");
|
||||
span.className = className;
|
||||
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
|
||||
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
|
||||
document.createTextNode(val.substr(pos + text.length)),
|
||||
node.nextSibling));
|
||||
node.nodeValue = val.substr(0, pos);
|
||||
}
|
||||
}
|
||||
else if (!jQuery(node).is("button, select, textarea")) {
|
||||
jQuery.each(node.childNodes, function() {
|
||||
highlight(this);
|
||||
});
|
||||
}
|
||||
}
|
||||
return this.each(function() {
|
||||
highlight(this);
|
||||
});
|
||||
};
|
||||
|
||||
/*
|
||||
* backward compatibility for jQuery.browser
|
||||
* This will be supported until firefox bug is fixed.
|
||||
*/
|
||||
if (!jQuery.browser) {
|
||||
jQuery.uaMatch = function(ua) {
|
||||
ua = ua.toLowerCase();
|
||||
|
||||
var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
|
||||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
|
||||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
|
||||
/(msie) ([\w.]+)/.exec(ua) ||
|
||||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
|
||||
[];
|
||||
|
||||
return {
|
||||
browser: match[ 1 ] || "",
|
||||
version: match[ 2 ] || "0"
|
||||
};
|
||||
};
|
||||
jQuery.browser = {};
|
||||
jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Small JavaScript module for the documentation.
|
||||
*/
|
||||
var Documentation = {
|
||||
|
||||
init : function() {
|
||||
this.fixFirefoxAnchorBug();
|
||||
this.highlightSearchWords();
|
||||
this.initIndexTable();
|
||||
},
|
||||
|
||||
/**
|
||||
* i18n support
|
||||
*/
|
||||
TRANSLATIONS : {},
|
||||
PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; },
|
||||
LOCALE : 'unknown',
|
||||
|
||||
// gettext and ngettext don't access this so that the functions
|
||||
// can safely bound to a different name (_ = Documentation.gettext)
|
||||
gettext : function(string) {
|
||||
var translated = Documentation.TRANSLATIONS[string];
|
||||
if (typeof translated == 'undefined')
|
||||
return string;
|
||||
return (typeof translated == 'string') ? translated : translated[0];
|
||||
},
|
||||
|
||||
ngettext : function(singular, plural, n) {
|
||||
var translated = Documentation.TRANSLATIONS[singular];
|
||||
if (typeof translated == 'undefined')
|
||||
return (n == 1) ? singular : plural;
|
||||
return translated[Documentation.PLURALEXPR(n)];
|
||||
},
|
||||
|
||||
addTranslations : function(catalog) {
|
||||
for (var key in catalog.messages)
|
||||
this.TRANSLATIONS[key] = catalog.messages[key];
|
||||
this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
|
||||
this.LOCALE = catalog.locale;
|
||||
},
|
||||
|
||||
/**
|
||||
* add context elements like header anchor links
|
||||
*/
|
||||
addContextElements : function() {
|
||||
$('div[id] > :header:first').each(function() {
|
||||
$('<a class="headerlink">\u00B6</a>').
|
||||
attr('href', '#' + this.id).
|
||||
attr('title', _('Permalink to this headline')).
|
||||
appendTo(this);
|
||||
});
|
||||
$('dt[id]').each(function() {
|
||||
$('<a class="headerlink">\u00B6</a>').
|
||||
attr('href', '#' + this.id).
|
||||
attr('title', _('Permalink to this definition')).
|
||||
appendTo(this);
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* workaround a firefox stupidity
|
||||
* see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
|
||||
*/
|
||||
fixFirefoxAnchorBug : function() {
|
||||
if (document.location.hash)
|
||||
window.setTimeout(function() {
|
||||
document.location.href += '';
|
||||
}, 10);
|
||||
},
|
||||
|
||||
/**
|
||||
* highlight the search words provided in the url in the text
|
||||
*/
|
||||
highlightSearchWords : function() {
|
||||
var params = $.getQueryParameters();
|
||||
var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
|
||||
if (terms.length) {
|
||||
var body = $('div.body');
|
||||
if (!body.length) {
|
||||
body = $('body');
|
||||
}
|
||||
window.setTimeout(function() {
|
||||
$.each(terms, function() {
|
||||
body.highlightText(this.toLowerCase(), 'highlighted');
|
||||
});
|
||||
}, 10);
|
||||
$('<p class="highlight-link"><a href="javascript:Documentation.' +
|
||||
'hideSearchWords()">' + _('Hide Search Matches') + '</a></p>')
|
||||
.appendTo($('#searchbox'));
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* init the domain index toggle buttons
|
||||
*/
|
||||
initIndexTable : function() {
|
||||
var togglers = $('img.toggler').click(function() {
|
||||
var src = $(this).attr('src');
|
||||
var idnum = $(this).attr('id').substr(7);
|
||||
$('tr.cg-' + idnum).toggle();
|
||||
if (src.substr(-9) == 'minus.png')
|
||||
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
|
||||
else
|
||||
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
|
||||
}).css('display', '');
|
||||
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
|
||||
togglers.click();
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* helper function to hide the search marks again
|
||||
*/
|
||||
hideSearchWords : function() {
|
||||
$('#searchbox .highlight-link').fadeOut(300);
|
||||
$('span.highlighted').removeClass('highlighted');
|
||||
},
|
||||
|
||||
/**
|
||||
* make the url absolute
|
||||
*/
|
||||
makeURL : function(relativeURL) {
|
||||
return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
|
||||
},
|
||||
|
||||
/**
|
||||
* get the current relative url
|
||||
*/
|
||||
getCurrentURL : function() {
|
||||
var path = document.location.pathname;
|
||||
var parts = path.split(/\//);
|
||||
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
|
||||
if (this == '..')
|
||||
parts.pop();
|
||||
});
|
||||
var url = parts.join('/');
|
||||
return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
|
||||
}
|
||||
};
|
||||
|
||||
// quick alias for translations
|
||||
_ = Documentation.gettext;
|
||||
|
||||
$(document).ready(function() {
|
||||
Documentation.init();
|
||||
});
|
||||
BIN
doc/build/html/_static/down-pressed.png
vendored
Normal file
|
After Width: | Height: | Size: 347 B |
BIN
doc/build/html/_static/down.png
vendored
Normal file
|
After Width: | Height: | Size: 347 B |
BIN
doc/build/html/_static/file.png
vendored
Normal file
|
After Width: | Height: | Size: 358 B |
10351
doc/build/html/_static/jquery.js
vendored
Normal file
BIN
doc/build/html/_static/minus.png
vendored
Normal file
|
After Width: | Height: | Size: 173 B |
BIN
doc/build/html/_static/plus.png
vendored
Normal file
|
After Width: | Height: | Size: 173 B |
65
doc/build/html/_static/pygments.css
vendored
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
.highlight .hll { background-color: #ffffcc }
|
||||
.highlight { background: #eeffcc; }
|
||||
.highlight .c { color: #408090; font-style: italic } /* Comment */
|
||||
.highlight .err { border: 1px solid #FF0000 } /* Error */
|
||||
.highlight .k { color: #007020; font-weight: bold } /* Keyword */
|
||||
.highlight .o { color: #666666 } /* Operator */
|
||||
.highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */
|
||||
.highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */
|
||||
.highlight .cp { color: #007020 } /* Comment.Preproc */
|
||||
.highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */
|
||||
.highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */
|
||||
.highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */
|
||||
.highlight .gd { color: #A00000 } /* Generic.Deleted */
|
||||
.highlight .ge { font-style: italic } /* Generic.Emph */
|
||||
.highlight .gr { color: #FF0000 } /* Generic.Error */
|
||||
.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
|
||||
.highlight .gi { color: #00A000 } /* Generic.Inserted */
|
||||
.highlight .go { color: #333333 } /* Generic.Output */
|
||||
.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */
|
||||
.highlight .gs { font-weight: bold } /* Generic.Strong */
|
||||
.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
|
||||
.highlight .gt { color: #0044DD } /* Generic.Traceback */
|
||||
.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */
|
||||
.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */
|
||||
.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */
|
||||
.highlight .kp { color: #007020 } /* Keyword.Pseudo */
|
||||
.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */
|
||||
.highlight .kt { color: #902000 } /* Keyword.Type */
|
||||
.highlight .m { color: #208050 } /* Literal.Number */
|
||||
.highlight .s { color: #4070a0 } /* Literal.String */
|
||||
.highlight .na { color: #4070a0 } /* Name.Attribute */
|
||||
.highlight .nb { color: #007020 } /* Name.Builtin */
|
||||
.highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */
|
||||
.highlight .no { color: #60add5 } /* Name.Constant */
|
||||
.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */
|
||||
.highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */
|
||||
.highlight .ne { color: #007020 } /* Name.Exception */
|
||||
.highlight .nf { color: #06287e } /* Name.Function */
|
||||
.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */
|
||||
.highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */
|
||||
.highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */
|
||||
.highlight .nv { color: #bb60d5 } /* Name.Variable */
|
||||
.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */
|
||||
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
|
||||
.highlight .mb { color: #208050 } /* Literal.Number.Bin */
|
||||
.highlight .mf { color: #208050 } /* Literal.Number.Float */
|
||||
.highlight .mh { color: #208050 } /* Literal.Number.Hex */
|
||||
.highlight .mi { color: #208050 } /* Literal.Number.Integer */
|
||||
.highlight .mo { color: #208050 } /* Literal.Number.Oct */
|
||||
.highlight .sb { color: #4070a0 } /* Literal.String.Backtick */
|
||||
.highlight .sc { color: #4070a0 } /* Literal.String.Char */
|
||||
.highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */
|
||||
.highlight .s2 { color: #4070a0 } /* Literal.String.Double */
|
||||
.highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */
|
||||
.highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */
|
||||
.highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */
|
||||
.highlight .sx { color: #c65d09 } /* Literal.String.Other */
|
||||
.highlight .sr { color: #235388 } /* Literal.String.Regex */
|
||||
.highlight .s1 { color: #4070a0 } /* Literal.String.Single */
|
||||
.highlight .ss { color: #517918 } /* Literal.String.Symbol */
|
||||
.highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */
|
||||
.highlight .vc { color: #bb60d5 } /* Name.Variable.Class */
|
||||
.highlight .vg { color: #bb60d5 } /* Name.Variable.Global */
|
||||
.highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */
|
||||
.highlight .il { color: #208050 } /* Literal.Number.Integer.Long */
|
||||
651
doc/build/html/_static/searchtools.js
vendored
Normal file
|
|
@ -0,0 +1,651 @@
|
|||
/*
|
||||
* searchtools.js_t
|
||||
* ~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* Sphinx JavaScript utilties for the full-text search.
|
||||
*
|
||||
* :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
/* Non-minified version JS is _stemmer.js if file is provided */
|
||||
/**
|
||||
* Porter Stemmer
|
||||
*/
|
||||
var Stemmer = function() {
|
||||
|
||||
var step2list = {
|
||||
ational: 'ate',
|
||||
tional: 'tion',
|
||||
enci: 'ence',
|
||||
anci: 'ance',
|
||||
izer: 'ize',
|
||||
bli: 'ble',
|
||||
alli: 'al',
|
||||
entli: 'ent',
|
||||
eli: 'e',
|
||||
ousli: 'ous',
|
||||
ization: 'ize',
|
||||
ation: 'ate',
|
||||
ator: 'ate',
|
||||
alism: 'al',
|
||||
iveness: 'ive',
|
||||
fulness: 'ful',
|
||||
ousness: 'ous',
|
||||
aliti: 'al',
|
||||
iviti: 'ive',
|
||||
biliti: 'ble',
|
||||
logi: 'log'
|
||||
};
|
||||
|
||||
var step3list = {
|
||||
icate: 'ic',
|
||||
ative: '',
|
||||
alize: 'al',
|
||||
iciti: 'ic',
|
||||
ical: 'ic',
|
||||
ful: '',
|
||||
ness: ''
|
||||
};
|
||||
|
||||
var c = "[^aeiou]"; // consonant
|
||||
var v = "[aeiouy]"; // vowel
|
||||
var C = c + "[^aeiouy]*"; // consonant sequence
|
||||
var V = v + "[aeiou]*"; // vowel sequence
|
||||
|
||||
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
|
||||
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
|
||||
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
|
||||
var s_v = "^(" + C + ")?" + v; // vowel in stem
|
||||
|
||||
this.stemWord = function (w) {
|
||||
var stem;
|
||||
var suffix;
|
||||
var firstch;
|
||||
var origword = w;
|
||||
|
||||
if (w.length < 3)
|
||||
return w;
|
||||
|
||||
var re;
|
||||
var re2;
|
||||
var re3;
|
||||
var re4;
|
||||
|
||||
firstch = w.substr(0,1);
|
||||
if (firstch == "y")
|
||||
w = firstch.toUpperCase() + w.substr(1);
|
||||
|
||||
// Step 1a
|
||||
re = /^(.+?)(ss|i)es$/;
|
||||
re2 = /^(.+?)([^s])s$/;
|
||||
|
||||
if (re.test(w))
|
||||
w = w.replace(re,"$1$2");
|
||||
else if (re2.test(w))
|
||||
w = w.replace(re2,"$1$2");
|
||||
|
||||
// Step 1b
|
||||
re = /^(.+?)eed$/;
|
||||
re2 = /^(.+?)(ed|ing)$/;
|
||||
if (re.test(w)) {
|
||||
var fp = re.exec(w);
|
||||
re = new RegExp(mgr0);
|
||||
if (re.test(fp[1])) {
|
||||
re = /.$/;
|
||||
w = w.replace(re,"");
|
||||
}
|
||||
}
|
||||
else if (re2.test(w)) {
|
||||
var fp = re2.exec(w);
|
||||
stem = fp[1];
|
||||
re2 = new RegExp(s_v);
|
||||
if (re2.test(stem)) {
|
||||
w = stem;
|
||||
re2 = /(at|bl|iz)$/;
|
||||
re3 = new RegExp("([^aeiouylsz])\\1$");
|
||||
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
|
||||
if (re2.test(w))
|
||||
w = w + "e";
|
||||
else if (re3.test(w)) {
|
||||
re = /.$/;
|
||||
w = w.replace(re,"");
|
||||
}
|
||||
else if (re4.test(w))
|
||||
w = w + "e";
|
||||
}
|
||||
}
|
||||
|
||||
// Step 1c
|
||||
re = /^(.+?)y$/;
|
||||
if (re.test(w)) {
|
||||
var fp = re.exec(w);
|
||||
stem = fp[1];
|
||||
re = new RegExp(s_v);
|
||||
if (re.test(stem))
|
||||
w = stem + "i";
|
||||
}
|
||||
|
||||
// Step 2
|
||||
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
|
||||
if (re.test(w)) {
|
||||
var fp = re.exec(w);
|
||||
stem = fp[1];
|
||||
suffix = fp[2];
|
||||
re = new RegExp(mgr0);
|
||||
if (re.test(stem))
|
||||
w = stem + step2list[suffix];
|
||||
}
|
||||
|
||||
// Step 3
|
||||
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
|
||||
if (re.test(w)) {
|
||||
var fp = re.exec(w);
|
||||
stem = fp[1];
|
||||
suffix = fp[2];
|
||||
re = new RegExp(mgr0);
|
||||
if (re.test(stem))
|
||||
w = stem + step3list[suffix];
|
||||
}
|
||||
|
||||
// Step 4
|
||||
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
|
||||
re2 = /^(.+?)(s|t)(ion)$/;
|
||||
if (re.test(w)) {
|
||||
var fp = re.exec(w);
|
||||
stem = fp[1];
|
||||
re = new RegExp(mgr1);
|
||||
if (re.test(stem))
|
||||
w = stem;
|
||||
}
|
||||
else if (re2.test(w)) {
|
||||
var fp = re2.exec(w);
|
||||
stem = fp[1] + fp[2];
|
||||
re2 = new RegExp(mgr1);
|
||||
if (re2.test(stem))
|
||||
w = stem;
|
||||
}
|
||||
|
||||
// Step 5
|
||||
re = /^(.+?)e$/;
|
||||
if (re.test(w)) {
|
||||
var fp = re.exec(w);
|
||||
stem = fp[1];
|
||||
re = new RegExp(mgr1);
|
||||
re2 = new RegExp(meq1);
|
||||
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
|
||||
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
|
||||
w = stem;
|
||||
}
|
||||
re = /ll$/;
|
||||
re2 = new RegExp(mgr1);
|
||||
if (re.test(w) && re2.test(w)) {
|
||||
re = /.$/;
|
||||
w = w.replace(re,"");
|
||||
}
|
||||
|
||||
// and turn initial Y back to y
|
||||
if (firstch == "y")
|
||||
w = firstch.toLowerCase() + w.substr(1);
|
||||
return w;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Simple result scoring code.
|
||||
*/
|
||||
var Scorer = {
|
||||
// Implement the following function to further tweak the score for each result
|
||||
// The function takes a result array [filename, title, anchor, descr, score]
|
||||
// and returns the new score.
|
||||
/*
|
||||
score: function(result) {
|
||||
return result[4];
|
||||
},
|
||||
*/
|
||||
|
||||
// query matches the full name of an object
|
||||
objNameMatch: 11,
|
||||
// or matches in the last dotted part of the object name
|
||||
objPartialMatch: 6,
|
||||
// Additive scores depending on the priority of the object
|
||||
objPrio: {0: 15, // used to be importantResults
|
||||
1: 5, // used to be objectResults
|
||||
2: -5}, // used to be unimportantResults
|
||||
// Used when the priority is not in the mapping.
|
||||
objPrioDefault: 0,
|
||||
|
||||
// query found in title
|
||||
title: 15,
|
||||
// query found in terms
|
||||
term: 5
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Search Module
|
||||
*/
|
||||
var Search = {
|
||||
|
||||
_index : null,
|
||||
_queued_query : null,
|
||||
_pulse_status : -1,
|
||||
|
||||
init : function() {
|
||||
var params = $.getQueryParameters();
|
||||
if (params.q) {
|
||||
var query = params.q[0];
|
||||
$('input[name="q"]')[0].value = query;
|
||||
this.performSearch(query);
|
||||
}
|
||||
},
|
||||
|
||||
loadIndex : function(url) {
|
||||
$.ajax({type: "GET", url: url, data: null,
|
||||
dataType: "script", cache: true,
|
||||
complete: function(jqxhr, textstatus) {
|
||||
if (textstatus != "success") {
|
||||
document.getElementById("searchindexloader").src = url;
|
||||
}
|
||||
}});
|
||||
},
|
||||
|
||||
setIndex : function(index) {
|
||||
var q;
|
||||
this._index = index;
|
||||
if ((q = this._queued_query) !== null) {
|
||||
this._queued_query = null;
|
||||
Search.query(q);
|
||||
}
|
||||
},
|
||||
|
||||
hasIndex : function() {
|
||||
return this._index !== null;
|
||||
},
|
||||
|
||||
deferQuery : function(query) {
|
||||
this._queued_query = query;
|
||||
},
|
||||
|
||||
stopPulse : function() {
|
||||
this._pulse_status = 0;
|
||||
},
|
||||
|
||||
startPulse : function() {
|
||||
if (this._pulse_status >= 0)
|
||||
return;
|
||||
function pulse() {
|
||||
var i;
|
||||
Search._pulse_status = (Search._pulse_status + 1) % 4;
|
||||
var dotString = '';
|
||||
for (i = 0; i < Search._pulse_status; i++)
|
||||
dotString += '.';
|
||||
Search.dots.text(dotString);
|
||||
if (Search._pulse_status > -1)
|
||||
window.setTimeout(pulse, 500);
|
||||
}
|
||||
pulse();
|
||||
},
|
||||
|
||||
/**
|
||||
* perform a search for something (or wait until index is loaded)
|
||||
*/
|
||||
performSearch : function(query) {
|
||||
// create the required interface elements
|
||||
this.out = $('#search-results');
|
||||
this.title = $('<h2>' + _('Searching') + '</h2>').appendTo(this.out);
|
||||
this.dots = $('<span></span>').appendTo(this.title);
|
||||
this.status = $('<p style="display: none"></p>').appendTo(this.out);
|
||||
this.output = $('<ul class="search"/>').appendTo(this.out);
|
||||
|
||||
$('#search-progress').text(_('Preparing search...'));
|
||||
this.startPulse();
|
||||
|
||||
// index already loaded, the browser was quick!
|
||||
if (this.hasIndex())
|
||||
this.query(query);
|
||||
else
|
||||
this.deferQuery(query);
|
||||
},
|
||||
|
||||
/**
|
||||
* execute search (requires search index to be loaded)
|
||||
*/
|
||||
query : function(query) {
|
||||
var i;
|
||||
var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"];
|
||||
|
||||
// stem the searchterms and add them to the correct list
|
||||
var stemmer = new Stemmer();
|
||||
var searchterms = [];
|
||||
var excluded = [];
|
||||
var hlterms = [];
|
||||
var tmp = query.split(/\s+/);
|
||||
var objectterms = [];
|
||||
for (i = 0; i < tmp.length; i++) {
|
||||
if (tmp[i] !== "") {
|
||||
objectterms.push(tmp[i].toLowerCase());
|
||||
}
|
||||
|
||||
if ($u.indexOf(stopwords, tmp[i].toLowerCase()) != -1 || tmp[i].match(/^\d+$/) ||
|
||||
tmp[i] === "") {
|
||||
// skip this "word"
|
||||
continue;
|
||||
}
|
||||
// stem the word
|
||||
var word = stemmer.stemWord(tmp[i].toLowerCase());
|
||||
var toAppend;
|
||||
// select the correct list
|
||||
if (word[0] == '-') {
|
||||
toAppend = excluded;
|
||||
word = word.substr(1);
|
||||
}
|
||||
else {
|
||||
toAppend = searchterms;
|
||||
hlterms.push(tmp[i].toLowerCase());
|
||||
}
|
||||
// only add if not already in the list
|
||||
if (!$u.contains(toAppend, word))
|
||||
toAppend.push(word);
|
||||
}
|
||||
var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
|
||||
|
||||
// console.debug('SEARCH: searching for:');
|
||||
// console.info('required: ', searchterms);
|
||||
// console.info('excluded: ', excluded);
|
||||
|
||||
// prepare search
|
||||
var terms = this._index.terms;
|
||||
var titleterms = this._index.titleterms;
|
||||
|
||||
// array of [filename, title, anchor, descr, score]
|
||||
var results = [];
|
||||
$('#search-progress').empty();
|
||||
|
||||
// lookup as object
|
||||
for (i = 0; i < objectterms.length; i++) {
|
||||
var others = [].concat(objectterms.slice(0, i),
|
||||
objectterms.slice(i+1, objectterms.length));
|
||||
results = results.concat(this.performObjectSearch(objectterms[i], others));
|
||||
}
|
||||
|
||||
// lookup as search terms in fulltext
|
||||
results = results.concat(this.performTermsSearch(searchterms, excluded, terms, titleterms));
|
||||
|
||||
// let the scorer override scores with a custom scoring function
|
||||
if (Scorer.score) {
|
||||
for (i = 0; i < results.length; i++)
|
||||
results[i][4] = Scorer.score(results[i]);
|
||||
}
|
||||
|
||||
// now sort the results by score (in opposite order of appearance, since the
|
||||
// display function below uses pop() to retrieve items) and then
|
||||
// alphabetically
|
||||
results.sort(function(a, b) {
|
||||
var left = a[4];
|
||||
var right = b[4];
|
||||
if (left > right) {
|
||||
return 1;
|
||||
} else if (left < right) {
|
||||
return -1;
|
||||
} else {
|
||||
// same score: sort alphabetically
|
||||
left = a[1].toLowerCase();
|
||||
right = b[1].toLowerCase();
|
||||
return (left > right) ? -1 : ((left < right) ? 1 : 0);
|
||||
}
|
||||
});
|
||||
|
||||
// for debugging
|
||||
//Search.lastresults = results.slice(); // a copy
|
||||
//console.info('search results:', Search.lastresults);
|
||||
|
||||
// print the results
|
||||
var resultCount = results.length;
|
||||
function displayNextItem() {
|
||||
// results left, load the summary and display it
|
||||
if (results.length) {
|
||||
var item = results.pop();
|
||||
var listItem = $('<li style="display:none"></li>');
|
||||
if (DOCUMENTATION_OPTIONS.FILE_SUFFIX === '') {
|
||||
// dirhtml builder
|
||||
var dirname = item[0] + '/';
|
||||
if (dirname.match(/\/index\/$/)) {
|
||||
dirname = dirname.substring(0, dirname.length-6);
|
||||
} else if (dirname == 'index/') {
|
||||
dirname = '';
|
||||
}
|
||||
listItem.append($('<a/>').attr('href',
|
||||
DOCUMENTATION_OPTIONS.URL_ROOT + dirname +
|
||||
highlightstring + item[2]).html(item[1]));
|
||||
} else {
|
||||
// normal html builders
|
||||
listItem.append($('<a/>').attr('href',
|
||||
item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX +
|
||||
highlightstring + item[2]).html(item[1]));
|
||||
}
|
||||
if (item[3]) {
|
||||
listItem.append($('<span> (' + item[3] + ')</span>'));
|
||||
Search.output.append(listItem);
|
||||
listItem.slideDown(5, function() {
|
||||
displayNextItem();
|
||||
});
|
||||
} else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
|
||||
$.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' + item[0] + '.txt',
|
||||
dataType: "text",
|
||||
complete: function(jqxhr, textstatus) {
|
||||
var data = jqxhr.responseText;
|
||||
if (data !== '' && data !== undefined) {
|
||||
listItem.append(Search.makeSearchSummary(data, searchterms, hlterms));
|
||||
}
|
||||
Search.output.append(listItem);
|
||||
listItem.slideDown(5, function() {
|
||||
displayNextItem();
|
||||
});
|
||||
}});
|
||||
} else {
|
||||
// no source available, just display title
|
||||
Search.output.append(listItem);
|
||||
listItem.slideDown(5, function() {
|
||||
displayNextItem();
|
||||
});
|
||||
}
|
||||
}
|
||||
// search finished, update title and status message
|
||||
else {
|
||||
Search.stopPulse();
|
||||
Search.title.text(_('Search Results'));
|
||||
if (!resultCount)
|
||||
Search.status.text(_('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.'));
|
||||
else
|
||||
Search.status.text(_('Search finished, found %s page(s) matching the search query.').replace('%s', resultCount));
|
||||
Search.status.fadeIn(500);
|
||||
}
|
||||
}
|
||||
displayNextItem();
|
||||
},
|
||||
|
||||
/**
|
||||
* search for object names
|
||||
*/
|
||||
performObjectSearch : function(object, otherterms) {
|
||||
var filenames = this._index.filenames;
|
||||
var objects = this._index.objects;
|
||||
var objnames = this._index.objnames;
|
||||
var titles = this._index.titles;
|
||||
|
||||
var i;
|
||||
var results = [];
|
||||
|
||||
for (var prefix in objects) {
|
||||
for (var name in objects[prefix]) {
|
||||
var fullname = (prefix ? prefix + '.' : '') + name;
|
||||
if (fullname.toLowerCase().indexOf(object) > -1) {
|
||||
var score = 0;
|
||||
var parts = fullname.split('.');
|
||||
// check for different match types: exact matches of full name or
|
||||
// "last name" (i.e. last dotted part)
|
||||
if (fullname == object || parts[parts.length - 1] == object) {
|
||||
score += Scorer.objNameMatch;
|
||||
// matches in last name
|
||||
} else if (parts[parts.length - 1].indexOf(object) > -1) {
|
||||
score += Scorer.objPartialMatch;
|
||||
}
|
||||
var match = objects[prefix][name];
|
||||
var objname = objnames[match[1]][2];
|
||||
var title = titles[match[0]];
|
||||
// If more than one term searched for, we require other words to be
|
||||
// found in the name/title/description
|
||||
if (otherterms.length > 0) {
|
||||
var haystack = (prefix + ' ' + name + ' ' +
|
||||
objname + ' ' + title).toLowerCase();
|
||||
var allfound = true;
|
||||
for (i = 0; i < otherterms.length; i++) {
|
||||
if (haystack.indexOf(otherterms[i]) == -1) {
|
||||
allfound = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!allfound) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
var descr = objname + _(', in ') + title;
|
||||
|
||||
var anchor = match[3];
|
||||
if (anchor === '')
|
||||
anchor = fullname;
|
||||
else if (anchor == '-')
|
||||
anchor = objnames[match[1]][1] + '-' + fullname;
|
||||
// add custom score for some objects according to scorer
|
||||
if (Scorer.objPrio.hasOwnProperty(match[2])) {
|
||||
score += Scorer.objPrio[match[2]];
|
||||
} else {
|
||||
score += Scorer.objPrioDefault;
|
||||
}
|
||||
results.push([filenames[match[0]], fullname, '#'+anchor, descr, score]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
},
|
||||
|
||||
/**
|
||||
* search for full-text terms in the index
|
||||
*/
|
||||
performTermsSearch : function(searchterms, excluded, terms, titleterms) {
|
||||
var filenames = this._index.filenames;
|
||||
var titles = this._index.titles;
|
||||
|
||||
var i, j, file;
|
||||
var fileMap = {};
|
||||
var scoreMap = {};
|
||||
var results = [];
|
||||
|
||||
// perform the search on the required terms
|
||||
for (i = 0; i < searchterms.length; i++) {
|
||||
var word = searchterms[i];
|
||||
var files = [];
|
||||
var _o = [
|
||||
{files: terms[word], score: Scorer.term},
|
||||
{files: titleterms[word], score: Scorer.title}
|
||||
];
|
||||
|
||||
// no match but word was a required one
|
||||
if ($u.every(_o, function(o){return o.files === undefined;})) {
|
||||
break;
|
||||
}
|
||||
// found search word in contents
|
||||
$u.each(_o, function(o) {
|
||||
var _files = o.files;
|
||||
if (_files === undefined)
|
||||
return
|
||||
|
||||
if (_files.length === undefined)
|
||||
_files = [_files];
|
||||
files = files.concat(_files);
|
||||
|
||||
// set score for the word in each file to Scorer.term
|
||||
for (j = 0; j < _files.length; j++) {
|
||||
file = _files[j];
|
||||
if (!(file in scoreMap))
|
||||
scoreMap[file] = {}
|
||||
scoreMap[file][word] = o.score;
|
||||
}
|
||||
});
|
||||
|
||||
// create the mapping
|
||||
for (j = 0; j < files.length; j++) {
|
||||
file = files[j];
|
||||
if (file in fileMap)
|
||||
fileMap[file].push(word);
|
||||
else
|
||||
fileMap[file] = [word];
|
||||
}
|
||||
}
|
||||
|
||||
// now check if the files don't contain excluded terms
|
||||
for (file in fileMap) {
|
||||
var valid = true;
|
||||
|
||||
// check if all requirements are matched
|
||||
if (fileMap[file].length != searchterms.length)
|
||||
continue;
|
||||
|
||||
// ensure that none of the excluded terms is in the search result
|
||||
for (i = 0; i < excluded.length; i++) {
|
||||
if (terms[excluded[i]] == file ||
|
||||
titleterms[excluded[i]] == file ||
|
||||
$u.contains(terms[excluded[i]] || [], file) ||
|
||||
$u.contains(titleterms[excluded[i]] || [], file)) {
|
||||
valid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// if we have still a valid result we can add it to the result list
|
||||
if (valid) {
|
||||
// select one (max) score for the file.
|
||||
// for better ranking, we should calculate ranking by using words statistics like basic tf-idf...
|
||||
var score = $u.max($u.map(fileMap[file], function(w){return scoreMap[file][w]}));
|
||||
results.push([filenames[file], titles[file], '', null, score]);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
},
|
||||
|
||||
/**
|
||||
* helper function to return a node containing the
|
||||
* search summary for a given text. keywords is a list
|
||||
* of stemmed words, hlwords is the list of normal, unstemmed
|
||||
* words. the first one is used to find the occurance, the
|
||||
* latter for highlighting it.
|
||||
*/
|
||||
makeSearchSummary : function(text, keywords, hlwords) {
|
||||
var textLower = text.toLowerCase();
|
||||
var start = 0;
|
||||
$.each(keywords, function() {
|
||||
var i = textLower.indexOf(this.toLowerCase());
|
||||
if (i > -1)
|
||||
start = i;
|
||||
});
|
||||
start = Math.max(start - 120, 0);
|
||||
var excerpt = ((start > 0) ? '...' : '') +
|
||||
$.trim(text.substr(start, 240)) +
|
||||
((start + 240 - text.length) ? '...' : '');
|
||||
var rv = $('<div class="context"></div>').text(excerpt);
|
||||
$.each(hlwords, function() {
|
||||
rv = rv.highlightText(this, 'highlighted');
|
||||
});
|
||||
return rv;
|
||||
}
|
||||
};
|
||||
|
||||
$(document).ready(function() {
|
||||
Search.init();
|
||||
});
|
||||
159
doc/build/html/_static/sidebar.js
vendored
Normal file
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* sidebar.js
|
||||
* ~~~~~~~~~~
|
||||
*
|
||||
* This script makes the Sphinx sidebar collapsible.
|
||||
*
|
||||
* .sphinxsidebar contains .sphinxsidebarwrapper. This script adds
|
||||
* in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton
|
||||
* used to collapse and expand the sidebar.
|
||||
*
|
||||
* When the sidebar is collapsed the .sphinxsidebarwrapper is hidden
|
||||
* and the width of the sidebar and the margin-left of the document
|
||||
* are decreased. When the sidebar is expanded the opposite happens.
|
||||
* This script saves a per-browser/per-session cookie used to
|
||||
* remember the position of the sidebar among the pages.
|
||||
* Once the browser is closed the cookie is deleted and the position
|
||||
* reset to the default (expanded).
|
||||
*
|
||||
* :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
$(function() {
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// global elements used by the functions.
|
||||
// the 'sidebarbutton' element is defined as global after its
|
||||
// creation, in the add_sidebar_button function
|
||||
var bodywrapper = $('.bodywrapper');
|
||||
var sidebar = $('.sphinxsidebar');
|
||||
var sidebarwrapper = $('.sphinxsidebarwrapper');
|
||||
|
||||
// for some reason, the document has no sidebar; do not run into errors
|
||||
if (!sidebar.length) return;
|
||||
|
||||
// original margin-left of the bodywrapper and width of the sidebar
|
||||
// with the sidebar expanded
|
||||
var bw_margin_expanded = bodywrapper.css('margin-left');
|
||||
var ssb_width_expanded = sidebar.width();
|
||||
|
||||
// margin-left of the bodywrapper and width of the sidebar
|
||||
// with the sidebar collapsed
|
||||
var bw_margin_collapsed = '.8em';
|
||||
var ssb_width_collapsed = '.8em';
|
||||
|
||||
// colors used by the current theme
|
||||
var dark_color = $('.related').css('background-color');
|
||||
var light_color = $('.document').css('background-color');
|
||||
|
||||
function sidebar_is_collapsed() {
|
||||
return sidebarwrapper.is(':not(:visible)');
|
||||
}
|
||||
|
||||
function toggle_sidebar() {
|
||||
if (sidebar_is_collapsed())
|
||||
expand_sidebar();
|
||||
else
|
||||
collapse_sidebar();
|
||||
}
|
||||
|
||||
function collapse_sidebar() {
|
||||
sidebarwrapper.hide();
|
||||
sidebar.css('width', ssb_width_collapsed);
|
||||
bodywrapper.css('margin-left', bw_margin_collapsed);
|
||||
sidebarbutton.css({
|
||||
'margin-left': '0',
|
||||
'height': bodywrapper.height()
|
||||
});
|
||||
sidebarbutton.find('span').text('»');
|
||||
sidebarbutton.attr('title', _('Expand sidebar'));
|
||||
document.cookie = 'sidebar=collapsed';
|
||||
}
|
||||
|
||||
function expand_sidebar() {
|
||||
bodywrapper.css('margin-left', bw_margin_expanded);
|
||||
sidebar.css('width', ssb_width_expanded);
|
||||
sidebarwrapper.show();
|
||||
sidebarbutton.css({
|
||||
'margin-left': ssb_width_expanded-12,
|
||||
'height': bodywrapper.height()
|
||||
});
|
||||
sidebarbutton.find('span').text('«');
|
||||
sidebarbutton.attr('title', _('Collapse sidebar'));
|
||||
document.cookie = 'sidebar=expanded';
|
||||
}
|
||||
|
||||
function add_sidebar_button() {
|
||||
sidebarwrapper.css({
|
||||
'float': 'left',
|
||||
'margin-right': '0',
|
||||
'width': ssb_width_expanded - 28
|
||||
});
|
||||
// create the button
|
||||
sidebar.append(
|
||||
'<div id="sidebarbutton"><span>«</span></div>'
|
||||
);
|
||||
var sidebarbutton = $('#sidebarbutton');
|
||||
light_color = sidebarbutton.css('background-color');
|
||||
// find the height of the viewport to center the '<<' in the page
|
||||
var viewport_height;
|
||||
if (window.innerHeight)
|
||||
viewport_height = window.innerHeight;
|
||||
else
|
||||
viewport_height = $(window).height();
|
||||
sidebarbutton.find('span').css({
|
||||
'display': 'block',
|
||||
'margin-top': (viewport_height - sidebar.position().top - 20) / 2
|
||||
});
|
||||
|
||||
sidebarbutton.click(toggle_sidebar);
|
||||
sidebarbutton.attr('title', _('Collapse sidebar'));
|
||||
sidebarbutton.css({
|
||||
'color': '#FFFFFF',
|
||||
'border-left': '1px solid ' + dark_color,
|
||||
'font-size': '1.2em',
|
||||
'cursor': 'pointer',
|
||||
'height': bodywrapper.height(),
|
||||
'padding-top': '1px',
|
||||
'margin-left': ssb_width_expanded - 12
|
||||
});
|
||||
|
||||
sidebarbutton.hover(
|
||||
function () {
|
||||
$(this).css('background-color', dark_color);
|
||||
},
|
||||
function () {
|
||||
$(this).css('background-color', light_color);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
function set_position_from_cookie() {
|
||||
if (!document.cookie)
|
||||
return;
|
||||
var items = document.cookie.split(';');
|
||||
for(var k=0; k<items.length; k++) {
|
||||
var key_val = items[k].split('=');
|
||||
var key = key_val[0].replace(/ /, ""); // strip leading spaces
|
||||
if (key == 'sidebar') {
|
||||
var value = key_val[1];
|
||||
if ((value == 'collapsed') && (!sidebar_is_collapsed()))
|
||||
collapse_sidebar();
|
||||
else if ((value == 'expanded') && (sidebar_is_collapsed()))
|
||||
expand_sidebar();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
add_sidebar_button();
|
||||
var sidebarbutton = $('#sidebarbutton');
|
||||
set_position_from_cookie();
|
||||
});
|
||||
BIN
doc/build/html/_static/up-pressed.png
vendored
Normal file
|
After Width: | Height: | Size: 345 B |
BIN
doc/build/html/_static/up.png
vendored
Normal file
|
After Width: | Height: | Size: 345 B |
808
doc/build/html/_static/websupport.js
vendored
Normal file
|
|
@ -0,0 +1,808 @@
|
|||
/*
|
||||
* websupport.js
|
||||
* ~~~~~~~~~~~~~
|
||||
*
|
||||
* sphinx.websupport utilties for all documentation.
|
||||
*
|
||||
* :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
(function($) {
|
||||
$.fn.autogrow = function() {
|
||||
return this.each(function() {
|
||||
var textarea = this;
|
||||
|
||||
$.fn.autogrow.resize(textarea);
|
||||
|
||||
$(textarea)
|
||||
.focus(function() {
|
||||
textarea.interval = setInterval(function() {
|
||||
$.fn.autogrow.resize(textarea);
|
||||
}, 500);
|
||||
})
|
||||
.blur(function() {
|
||||
clearInterval(textarea.interval);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
$.fn.autogrow.resize = function(textarea) {
|
||||
var lineHeight = parseInt($(textarea).css('line-height'), 10);
|
||||
var lines = textarea.value.split('\n');
|
||||
var columns = textarea.cols;
|
||||
var lineCount = 0;
|
||||
$.each(lines, function() {
|
||||
lineCount += Math.ceil(this.length / columns) || 1;
|
||||
});
|
||||
var height = lineHeight * (lineCount + 1);
|
||||
$(textarea).css('height', height);
|
||||
};
|
||||
})(jQuery);
|
||||
|
||||
(function($) {
|
||||
var comp, by;
|
||||
|
||||
function init() {
|
||||
initEvents();
|
||||
initComparator();
|
||||
}
|
||||
|
||||
function initEvents() {
|
||||
$(document).on("click", 'a.comment-close', function(event) {
|
||||
event.preventDefault();
|
||||
hide($(this).attr('id').substring(2));
|
||||
});
|
||||
$(document).on("click", 'a.vote', function(event) {
|
||||
event.preventDefault();
|
||||
handleVote($(this));
|
||||
});
|
||||
$(document).on("click", 'a.reply', function(event) {
|
||||
event.preventDefault();
|
||||
openReply($(this).attr('id').substring(2));
|
||||
});
|
||||
$(document).on("click", 'a.close-reply', function(event) {
|
||||
event.preventDefault();
|
||||
closeReply($(this).attr('id').substring(2));
|
||||
});
|
||||
$(document).on("click", 'a.sort-option', function(event) {
|
||||
event.preventDefault();
|
||||
handleReSort($(this));
|
||||
});
|
||||
$(document).on("click", 'a.show-proposal', function(event) {
|
||||
event.preventDefault();
|
||||
showProposal($(this).attr('id').substring(2));
|
||||
});
|
||||
$(document).on("click", 'a.hide-proposal', function(event) {
|
||||
event.preventDefault();
|
||||
hideProposal($(this).attr('id').substring(2));
|
||||
});
|
||||
$(document).on("click", 'a.show-propose-change', function(event) {
|
||||
event.preventDefault();
|
||||
showProposeChange($(this).attr('id').substring(2));
|
||||
});
|
||||
$(document).on("click", 'a.hide-propose-change', function(event) {
|
||||
event.preventDefault();
|
||||
hideProposeChange($(this).attr('id').substring(2));
|
||||
});
|
||||
$(document).on("click", 'a.accept-comment', function(event) {
|
||||
event.preventDefault();
|
||||
acceptComment($(this).attr('id').substring(2));
|
||||
});
|
||||
$(document).on("click", 'a.delete-comment', function(event) {
|
||||
event.preventDefault();
|
||||
deleteComment($(this).attr('id').substring(2));
|
||||
});
|
||||
$(document).on("click", 'a.comment-markup', function(event) {
|
||||
event.preventDefault();
|
||||
toggleCommentMarkupBox($(this).attr('id').substring(2));
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Set comp, which is a comparator function used for sorting and
|
||||
* inserting comments into the list.
|
||||
*/
|
||||
function setComparator() {
|
||||
// If the first three letters are "asc", sort in ascending order
|
||||
// and remove the prefix.
|
||||
if (by.substring(0,3) == 'asc') {
|
||||
var i = by.substring(3);
|
||||
comp = function(a, b) { return a[i] - b[i]; };
|
||||
} else {
|
||||
// Otherwise sort in descending order.
|
||||
comp = function(a, b) { return b[by] - a[by]; };
|
||||
}
|
||||
|
||||
// Reset link styles and format the selected sort option.
|
||||
$('a.sel').attr('href', '#').removeClass('sel');
|
||||
$('a.by' + by).removeAttr('href').addClass('sel');
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a comp function. If the user has preferences stored in
|
||||
* the sortBy cookie, use those, otherwise use the default.
|
||||
*/
|
||||
function initComparator() {
|
||||
by = 'rating'; // Default to sort by rating.
|
||||
// If the sortBy cookie is set, use that instead.
|
||||
if (document.cookie.length > 0) {
|
||||
var start = document.cookie.indexOf('sortBy=');
|
||||
if (start != -1) {
|
||||
start = start + 7;
|
||||
var end = document.cookie.indexOf(";", start);
|
||||
if (end == -1) {
|
||||
end = document.cookie.length;
|
||||
by = unescape(document.cookie.substring(start, end));
|
||||
}
|
||||
}
|
||||
}
|
||||
setComparator();
|
||||
}
|
||||
|
||||
/**
|
||||
* Show a comment div.
|
||||
*/
|
||||
function show(id) {
|
||||
$('#ao' + id).hide();
|
||||
$('#ah' + id).show();
|
||||
var context = $.extend({id: id}, opts);
|
||||
var popup = $(renderTemplate(popupTemplate, context)).hide();
|
||||
popup.find('textarea[name="proposal"]').hide();
|
||||
popup.find('a.by' + by).addClass('sel');
|
||||
var form = popup.find('#cf' + id);
|
||||
form.submit(function(event) {
|
||||
event.preventDefault();
|
||||
addComment(form);
|
||||
});
|
||||
$('#s' + id).after(popup);
|
||||
popup.slideDown('fast', function() {
|
||||
getComments(id);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Hide a comment div.
|
||||
*/
|
||||
function hide(id) {
|
||||
$('#ah' + id).hide();
|
||||
$('#ao' + id).show();
|
||||
var div = $('#sc' + id);
|
||||
div.slideUp('fast', function() {
|
||||
div.remove();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform an ajax request to get comments for a node
|
||||
* and insert the comments into the comments tree.
|
||||
*/
|
||||
function getComments(id) {
|
||||
$.ajax({
|
||||
type: 'GET',
|
||||
url: opts.getCommentsURL,
|
||||
data: {node: id},
|
||||
success: function(data, textStatus, request) {
|
||||
var ul = $('#cl' + id);
|
||||
var speed = 100;
|
||||
$('#cf' + id)
|
||||
.find('textarea[name="proposal"]')
|
||||
.data('source', data.source);
|
||||
|
||||
if (data.comments.length === 0) {
|
||||
ul.html('<li>No comments yet.</li>');
|
||||
ul.data('empty', true);
|
||||
} else {
|
||||
// If there are comments, sort them and put them in the list.
|
||||
var comments = sortComments(data.comments);
|
||||
speed = data.comments.length * 100;
|
||||
appendComments(comments, ul);
|
||||
ul.data('empty', false);
|
||||
}
|
||||
$('#cn' + id).slideUp(speed + 200);
|
||||
ul.slideDown(speed);
|
||||
},
|
||||
error: function(request, textStatus, error) {
|
||||
showError('Oops, there was a problem retrieving the comments.');
|
||||
},
|
||||
dataType: 'json'
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a comment via ajax and insert the comment into the comment tree.
|
||||
*/
|
||||
function addComment(form) {
|
||||
var node_id = form.find('input[name="node"]').val();
|
||||
var parent_id = form.find('input[name="parent"]').val();
|
||||
var text = form.find('textarea[name="comment"]').val();
|
||||
var proposal = form.find('textarea[name="proposal"]').val();
|
||||
|
||||
if (text == '') {
|
||||
showError('Please enter a comment.');
|
||||
return;
|
||||
}
|
||||
|
||||
// Disable the form that is being submitted.
|
||||
form.find('textarea,input').attr('disabled', 'disabled');
|
||||
|
||||
// Send the comment to the server.
|
||||
$.ajax({
|
||||
type: "POST",
|
||||
url: opts.addCommentURL,
|
||||
dataType: 'json',
|
||||
data: {
|
||||
node: node_id,
|
||||
parent: parent_id,
|
||||
text: text,
|
||||
proposal: proposal
|
||||
},
|
||||
success: function(data, textStatus, error) {
|
||||
// Reset the form.
|
||||
if (node_id) {
|
||||
hideProposeChange(node_id);
|
||||
}
|
||||
form.find('textarea')
|
||||
.val('')
|
||||
.add(form.find('input'))
|
||||
.removeAttr('disabled');
|
||||
var ul = $('#cl' + (node_id || parent_id));
|
||||
if (ul.data('empty')) {
|
||||
$(ul).empty();
|
||||
ul.data('empty', false);
|
||||
}
|
||||
insertComment(data.comment);
|
||||
var ao = $('#ao' + node_id);
|
||||
ao.find('img').attr({'src': opts.commentBrightImage});
|
||||
if (node_id) {
|
||||
// if this was a "root" comment, remove the commenting box
|
||||
// (the user can get it back by reopening the comment popup)
|
||||
$('#ca' + node_id).slideUp();
|
||||
}
|
||||
},
|
||||
error: function(request, textStatus, error) {
|
||||
form.find('textarea,input').removeAttr('disabled');
|
||||
showError('Oops, there was a problem adding the comment.');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively append comments to the main comment list and children
|
||||
* lists, creating the comment tree.
|
||||
*/
|
||||
function appendComments(comments, ul) {
|
||||
$.each(comments, function() {
|
||||
var div = createCommentDiv(this);
|
||||
ul.append($(document.createElement('li')).html(div));
|
||||
appendComments(this.children, div.find('ul.comment-children'));
|
||||
// To avoid stagnating data, don't store the comments children in data.
|
||||
this.children = null;
|
||||
div.data('comment', this);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* After adding a new comment, it must be inserted in the correct
|
||||
* location in the comment tree.
|
||||
*/
|
||||
function insertComment(comment) {
|
||||
var div = createCommentDiv(comment);
|
||||
|
||||
// To avoid stagnating data, don't store the comments children in data.
|
||||
comment.children = null;
|
||||
div.data('comment', comment);
|
||||
|
||||
var ul = $('#cl' + (comment.node || comment.parent));
|
||||
var siblings = getChildren(ul);
|
||||
|
||||
var li = $(document.createElement('li'));
|
||||
li.hide();
|
||||
|
||||
// Determine where in the parents children list to insert this comment.
|
||||
for(i=0; i < siblings.length; i++) {
|
||||
if (comp(comment, siblings[i]) <= 0) {
|
||||
$('#cd' + siblings[i].id)
|
||||
.parent()
|
||||
.before(li.html(div));
|
||||
li.slideDown('fast');
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, this comment rates lower than all the others,
|
||||
// or it is the only comment in the list.
|
||||
ul.append(li.html(div));
|
||||
li.slideDown('fast');
|
||||
}
|
||||
|
||||
function acceptComment(id) {
|
||||
$.ajax({
|
||||
type: 'POST',
|
||||
url: opts.acceptCommentURL,
|
||||
data: {id: id},
|
||||
success: function(data, textStatus, request) {
|
||||
$('#cm' + id).fadeOut('fast');
|
||||
$('#cd' + id).removeClass('moderate');
|
||||
},
|
||||
error: function(request, textStatus, error) {
|
||||
showError('Oops, there was a problem accepting the comment.');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function deleteComment(id) {
|
||||
$.ajax({
|
||||
type: 'POST',
|
||||
url: opts.deleteCommentURL,
|
||||
data: {id: id},
|
||||
success: function(data, textStatus, request) {
|
||||
var div = $('#cd' + id);
|
||||
if (data == 'delete') {
|
||||
// Moderator mode: remove the comment and all children immediately
|
||||
div.slideUp('fast', function() {
|
||||
div.remove();
|
||||
});
|
||||
return;
|
||||
}
|
||||
// User mode: only mark the comment as deleted
|
||||
div
|
||||
.find('span.user-id:first')
|
||||
.text('[deleted]').end()
|
||||
.find('div.comment-text:first')
|
||||
.text('[deleted]').end()
|
||||
.find('#cm' + id + ', #dc' + id + ', #ac' + id + ', #rc' + id +
|
||||
', #sp' + id + ', #hp' + id + ', #cr' + id + ', #rl' + id)
|
||||
.remove();
|
||||
var comment = div.data('comment');
|
||||
comment.username = '[deleted]';
|
||||
comment.text = '[deleted]';
|
||||
div.data('comment', comment);
|
||||
},
|
||||
error: function(request, textStatus, error) {
|
||||
showError('Oops, there was a problem deleting the comment.');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function showProposal(id) {
|
||||
$('#sp' + id).hide();
|
||||
$('#hp' + id).show();
|
||||
$('#pr' + id).slideDown('fast');
|
||||
}
|
||||
|
||||
function hideProposal(id) {
|
||||
$('#hp' + id).hide();
|
||||
$('#sp' + id).show();
|
||||
$('#pr' + id).slideUp('fast');
|
||||
}
|
||||
|
||||
function showProposeChange(id) {
|
||||
$('#pc' + id).hide();
|
||||
$('#hc' + id).show();
|
||||
var textarea = $('#pt' + id);
|
||||
textarea.val(textarea.data('source'));
|
||||
$.fn.autogrow.resize(textarea[0]);
|
||||
textarea.slideDown('fast');
|
||||
}
|
||||
|
||||
function hideProposeChange(id) {
|
||||
$('#hc' + id).hide();
|
||||
$('#pc' + id).show();
|
||||
var textarea = $('#pt' + id);
|
||||
textarea.val('').removeAttr('disabled');
|
||||
textarea.slideUp('fast');
|
||||
}
|
||||
|
||||
function toggleCommentMarkupBox(id) {
|
||||
$('#mb' + id).toggle();
|
||||
}
|
||||
|
||||
/** Handle when the user clicks on a sort by link. */
|
||||
function handleReSort(link) {
|
||||
var classes = link.attr('class').split(/\s+/);
|
||||
for (var i=0; i<classes.length; i++) {
|
||||
if (classes[i] != 'sort-option') {
|
||||
by = classes[i].substring(2);
|
||||
}
|
||||
}
|
||||
setComparator();
|
||||
// Save/update the sortBy cookie.
|
||||
var expiration = new Date();
|
||||
expiration.setDate(expiration.getDate() + 365);
|
||||
document.cookie= 'sortBy=' + escape(by) +
|
||||
';expires=' + expiration.toUTCString();
|
||||
$('ul.comment-ul').each(function(index, ul) {
|
||||
var comments = getChildren($(ul), true);
|
||||
comments = sortComments(comments);
|
||||
appendComments(comments, $(ul).empty());
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Function to process a vote when a user clicks an arrow.
|
||||
*/
|
||||
function handleVote(link) {
|
||||
if (!opts.voting) {
|
||||
showError("You'll need to login to vote.");
|
||||
return;
|
||||
}
|
||||
|
||||
var id = link.attr('id');
|
||||
if (!id) {
|
||||
// Didn't click on one of the voting arrows.
|
||||
return;
|
||||
}
|
||||
// If it is an unvote, the new vote value is 0,
|
||||
// Otherwise it's 1 for an upvote, or -1 for a downvote.
|
||||
var value = 0;
|
||||
if (id.charAt(1) != 'u') {
|
||||
value = id.charAt(0) == 'u' ? 1 : -1;
|
||||
}
|
||||
// The data to be sent to the server.
|
||||
var d = {
|
||||
comment_id: id.substring(2),
|
||||
value: value
|
||||
};
|
||||
|
||||
// Swap the vote and unvote links.
|
||||
link.hide();
|
||||
$('#' + id.charAt(0) + (id.charAt(1) == 'u' ? 'v' : 'u') + d.comment_id)
|
||||
.show();
|
||||
|
||||
// The div the comment is displayed in.
|
||||
var div = $('div#cd' + d.comment_id);
|
||||
var data = div.data('comment');
|
||||
|
||||
// If this is not an unvote, and the other vote arrow has
|
||||
// already been pressed, unpress it.
|
||||
if ((d.value !== 0) && (data.vote === d.value * -1)) {
|
||||
$('#' + (d.value == 1 ? 'd' : 'u') + 'u' + d.comment_id).hide();
|
||||
$('#' + (d.value == 1 ? 'd' : 'u') + 'v' + d.comment_id).show();
|
||||
}
|
||||
|
||||
// Update the comments rating in the local data.
|
||||
data.rating += (data.vote === 0) ? d.value : (d.value - data.vote);
|
||||
data.vote = d.value;
|
||||
div.data('comment', data);
|
||||
|
||||
// Change the rating text.
|
||||
div.find('.rating:first')
|
||||
.text(data.rating + ' point' + (data.rating == 1 ? '' : 's'));
|
||||
|
||||
// Send the vote information to the server.
|
||||
$.ajax({
|
||||
type: "POST",
|
||||
url: opts.processVoteURL,
|
||||
data: d,
|
||||
error: function(request, textStatus, error) {
|
||||
showError('Oops, there was a problem casting that vote.');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a reply form used to reply to an existing comment.
|
||||
*/
|
||||
function openReply(id) {
|
||||
// Swap out the reply link for the hide link
|
||||
$('#rl' + id).hide();
|
||||
$('#cr' + id).show();
|
||||
|
||||
// Add the reply li to the children ul.
|
||||
var div = $(renderTemplate(replyTemplate, {id: id})).hide();
|
||||
$('#cl' + id)
|
||||
.prepend(div)
|
||||
// Setup the submit handler for the reply form.
|
||||
.find('#rf' + id)
|
||||
.submit(function(event) {
|
||||
event.preventDefault();
|
||||
addComment($('#rf' + id));
|
||||
closeReply(id);
|
||||
})
|
||||
.find('input[type=button]')
|
||||
.click(function() {
|
||||
closeReply(id);
|
||||
});
|
||||
div.slideDown('fast', function() {
|
||||
$('#rf' + id).find('textarea').focus();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the reply form opened with openReply.
|
||||
*/
|
||||
function closeReply(id) {
|
||||
// Remove the reply div from the DOM.
|
||||
$('#rd' + id).slideUp('fast', function() {
|
||||
$(this).remove();
|
||||
});
|
||||
|
||||
// Swap out the hide link for the reply link
|
||||
$('#cr' + id).hide();
|
||||
$('#rl' + id).show();
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively sort a tree of comments using the comp comparator.
|
||||
*/
|
||||
function sortComments(comments) {
|
||||
comments.sort(comp);
|
||||
$.each(comments, function() {
|
||||
this.children = sortComments(this.children);
|
||||
});
|
||||
return comments;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the children comments from a ul. If recursive is true,
|
||||
* recursively include childrens' children.
|
||||
*/
|
||||
function getChildren(ul, recursive) {
|
||||
var children = [];
|
||||
ul.children().children("[id^='cd']")
|
||||
.each(function() {
|
||||
var comment = $(this).data('comment');
|
||||
if (recursive)
|
||||
comment.children = getChildren($(this).find('#cl' + comment.id), true);
|
||||
children.push(comment);
|
||||
});
|
||||
return children;
|
||||
}
|
||||
|
||||
/** Create a div to display a comment in. */
|
||||
function createCommentDiv(comment) {
|
||||
if (!comment.displayed && !opts.moderator) {
|
||||
return $('<div class="moderate">Thank you! Your comment will show up '
|
||||
+ 'once it is has been approved by a moderator.</div>');
|
||||
}
|
||||
// Prettify the comment rating.
|
||||
comment.pretty_rating = comment.rating + ' point' +
|
||||
(comment.rating == 1 ? '' : 's');
|
||||
// Make a class (for displaying not yet moderated comments differently)
|
||||
comment.css_class = comment.displayed ? '' : ' moderate';
|
||||
// Create a div for this comment.
|
||||
var context = $.extend({}, opts, comment);
|
||||
var div = $(renderTemplate(commentTemplate, context));
|
||||
|
||||
// If the user has voted on this comment, highlight the correct arrow.
|
||||
if (comment.vote) {
|
||||
var direction = (comment.vote == 1) ? 'u' : 'd';
|
||||
div.find('#' + direction + 'v' + comment.id).hide();
|
||||
div.find('#' + direction + 'u' + comment.id).show();
|
||||
}
|
||||
|
||||
if (opts.moderator || comment.text != '[deleted]') {
|
||||
div.find('a.reply').show();
|
||||
if (comment.proposal_diff)
|
||||
div.find('#sp' + comment.id).show();
|
||||
if (opts.moderator && !comment.displayed)
|
||||
div.find('#cm' + comment.id).show();
|
||||
if (opts.moderator || (opts.username == comment.username))
|
||||
div.find('#dc' + comment.id).show();
|
||||
}
|
||||
return div;
|
||||
}
|
||||
|
||||
/**
|
||||
* A simple template renderer. Placeholders such as <%id%> are replaced
|
||||
* by context['id'] with items being escaped. Placeholders such as <#id#>
|
||||
* are not escaped.
|
||||
*/
|
||||
function renderTemplate(template, context) {
|
||||
var esc = $(document.createElement('div'));
|
||||
|
||||
function handle(ph, escape) {
|
||||
var cur = context;
|
||||
$.each(ph.split('.'), function() {
|
||||
cur = cur[this];
|
||||
});
|
||||
return escape ? esc.text(cur || "").html() : cur;
|
||||
}
|
||||
|
||||
return template.replace(/<([%#])([\w\.]*)\1>/g, function() {
|
||||
return handle(arguments[2], arguments[1] == '%' ? true : false);
|
||||
});
|
||||
}
|
||||
|
||||
/** Flash an error message briefly. */
|
||||
function showError(message) {
|
||||
$(document.createElement('div')).attr({'class': 'popup-error'})
|
||||
.append($(document.createElement('div'))
|
||||
.attr({'class': 'error-message'}).text(message))
|
||||
.appendTo('body')
|
||||
.fadeIn("slow")
|
||||
.delay(2000)
|
||||
.fadeOut("slow");
|
||||
}
|
||||
|
||||
/** Add a link the user uses to open the comments popup. */
|
||||
$.fn.comment = function() {
|
||||
return this.each(function() {
|
||||
var id = $(this).attr('id').substring(1);
|
||||
var count = COMMENT_METADATA[id];
|
||||
var title = count + ' comment' + (count == 1 ? '' : 's');
|
||||
var image = count > 0 ? opts.commentBrightImage : opts.commentImage;
|
||||
var addcls = count == 0 ? ' nocomment' : '';
|
||||
$(this)
|
||||
.append(
|
||||
$(document.createElement('a')).attr({
|
||||
href: '#',
|
||||
'class': 'sphinx-comment-open' + addcls,
|
||||
id: 'ao' + id
|
||||
})
|
||||
.append($(document.createElement('img')).attr({
|
||||
src: image,
|
||||
alt: 'comment',
|
||||
title: title
|
||||
}))
|
||||
.click(function(event) {
|
||||
event.preventDefault();
|
||||
show($(this).attr('id').substring(2));
|
||||
})
|
||||
)
|
||||
.append(
|
||||
$(document.createElement('a')).attr({
|
||||
href: '#',
|
||||
'class': 'sphinx-comment-close hidden',
|
||||
id: 'ah' + id
|
||||
})
|
||||
.append($(document.createElement('img')).attr({
|
||||
src: opts.closeCommentImage,
|
||||
alt: 'close',
|
||||
title: 'close'
|
||||
}))
|
||||
.click(function(event) {
|
||||
event.preventDefault();
|
||||
hide($(this).attr('id').substring(2));
|
||||
})
|
||||
);
|
||||
});
|
||||
};
|
||||
|
||||
var opts = {
|
||||
processVoteURL: '/_process_vote',
|
||||
addCommentURL: '/_add_comment',
|
||||
getCommentsURL: '/_get_comments',
|
||||
acceptCommentURL: '/_accept_comment',
|
||||
deleteCommentURL: '/_delete_comment',
|
||||
commentImage: '/static/_static/comment.png',
|
||||
closeCommentImage: '/static/_static/comment-close.png',
|
||||
loadingImage: '/static/_static/ajax-loader.gif',
|
||||
commentBrightImage: '/static/_static/comment-bright.png',
|
||||
upArrow: '/static/_static/up.png',
|
||||
downArrow: '/static/_static/down.png',
|
||||
upArrowPressed: '/static/_static/up-pressed.png',
|
||||
downArrowPressed: '/static/_static/down-pressed.png',
|
||||
voting: false,
|
||||
moderator: false
|
||||
};
|
||||
|
||||
if (typeof COMMENT_OPTIONS != "undefined") {
|
||||
opts = jQuery.extend(opts, COMMENT_OPTIONS);
|
||||
}
|
||||
|
||||
var popupTemplate = '\
|
||||
<div class="sphinx-comments" id="sc<%id%>">\
|
||||
<p class="sort-options">\
|
||||
Sort by:\
|
||||
<a href="#" class="sort-option byrating">best rated</a>\
|
||||
<a href="#" class="sort-option byascage">newest</a>\
|
||||
<a href="#" class="sort-option byage">oldest</a>\
|
||||
</p>\
|
||||
<div class="comment-header">Comments</div>\
|
||||
<div class="comment-loading" id="cn<%id%>">\
|
||||
loading comments... <img src="<%loadingImage%>" alt="" /></div>\
|
||||
<ul id="cl<%id%>" class="comment-ul"></ul>\
|
||||
<div id="ca<%id%>">\
|
||||
<p class="add-a-comment">Add a comment\
|
||||
(<a href="#" class="comment-markup" id="ab<%id%>">markup</a>):</p>\
|
||||
<div class="comment-markup-box" id="mb<%id%>">\
|
||||
reStructured text markup: <i>*emph*</i>, <b>**strong**</b>, \
|
||||
<code>``code``</code>, \
|
||||
code blocks: <code>::</code> and an indented block after blank line</div>\
|
||||
<form method="post" id="cf<%id%>" class="comment-form" action="">\
|
||||
<textarea name="comment" cols="80"></textarea>\
|
||||
<p class="propose-button">\
|
||||
<a href="#" id="pc<%id%>" class="show-propose-change">\
|
||||
Propose a change ▹\
|
||||
</a>\
|
||||
<a href="#" id="hc<%id%>" class="hide-propose-change">\
|
||||
Propose a change ▿\
|
||||
</a>\
|
||||
</p>\
|
||||
<textarea name="proposal" id="pt<%id%>" cols="80"\
|
||||
spellcheck="false"></textarea>\
|
||||
<input type="submit" value="Add comment" />\
|
||||
<input type="hidden" name="node" value="<%id%>" />\
|
||||
<input type="hidden" name="parent" value="" />\
|
||||
</form>\
|
||||
</div>\
|
||||
</div>';
|
||||
|
||||
var commentTemplate = '\
|
||||
<div id="cd<%id%>" class="sphinx-comment<%css_class%>">\
|
||||
<div class="vote">\
|
||||
<div class="arrow">\
|
||||
<a href="#" id="uv<%id%>" class="vote" title="vote up">\
|
||||
<img src="<%upArrow%>" />\
|
||||
</a>\
|
||||
<a href="#" id="uu<%id%>" class="un vote" title="vote up">\
|
||||
<img src="<%upArrowPressed%>" />\
|
||||
</a>\
|
||||
</div>\
|
||||
<div class="arrow">\
|
||||
<a href="#" id="dv<%id%>" class="vote" title="vote down">\
|
||||
<img src="<%downArrow%>" id="da<%id%>" />\
|
||||
</a>\
|
||||
<a href="#" id="du<%id%>" class="un vote" title="vote down">\
|
||||
<img src="<%downArrowPressed%>" />\
|
||||
</a>\
|
||||
</div>\
|
||||
</div>\
|
||||
<div class="comment-content">\
|
||||
<p class="tagline comment">\
|
||||
<span class="user-id"><%username%></span>\
|
||||
<span class="rating"><%pretty_rating%></span>\
|
||||
<span class="delta"><%time.delta%></span>\
|
||||
</p>\
|
||||
<div class="comment-text comment"><#text#></div>\
|
||||
<p class="comment-opts comment">\
|
||||
<a href="#" class="reply hidden" id="rl<%id%>">reply ▹</a>\
|
||||
<a href="#" class="close-reply" id="cr<%id%>">reply ▿</a>\
|
||||
<a href="#" id="sp<%id%>" class="show-proposal">proposal ▹</a>\
|
||||
<a href="#" id="hp<%id%>" class="hide-proposal">proposal ▿</a>\
|
||||
<a href="#" id="dc<%id%>" class="delete-comment hidden">delete</a>\
|
||||
<span id="cm<%id%>" class="moderation hidden">\
|
||||
<a href="#" id="ac<%id%>" class="accept-comment">accept</a>\
|
||||
</span>\
|
||||
</p>\
|
||||
<pre class="proposal" id="pr<%id%>">\
|
||||
<#proposal_diff#>\
|
||||
</pre>\
|
||||
<ul class="comment-children" id="cl<%id%>"></ul>\
|
||||
</div>\
|
||||
<div class="clearleft"></div>\
|
||||
</div>\
|
||||
</div>';
|
||||
|
||||
var replyTemplate = '\
|
||||
<li>\
|
||||
<div class="reply-div" id="rd<%id%>">\
|
||||
<form id="rf<%id%>">\
|
||||
<textarea name="comment" cols="80"></textarea>\
|
||||
<input type="submit" value="Add reply" />\
|
||||
<input type="button" value="Cancel" />\
|
||||
<input type="hidden" name="parent" value="<%id%>" />\
|
||||
<input type="hidden" name="node" value="" />\
|
||||
</form>\
|
||||
</div>\
|
||||
</li>';
|
||||
|
||||
$(document).ready(function() {
|
||||
init();
|
||||
});
|
||||
})(jQuery);
|
||||
|
||||
$(document).ready(function() {
|
||||
// add comment anchors for all paragraphs that are commentable
|
||||
$('.sphinx-has-comment').comment();
|
||||
|
||||
// highlight search words in search results
|
||||
$("div.context").each(function() {
|
||||
var params = $.getQueryParameters();
|
||||
var terms = (params.q) ? params.q[0].split(/\s+/) : [];
|
||||
var result = $(this);
|
||||
$.each(terms, function() {
|
||||
result.highlightText(this.toLowerCase(), 'highlighted');
|
||||
});
|
||||
});
|
||||
|
||||
// directly open comment window if requested
|
||||
var anchor = document.location.hash;
|
||||
if (anchor.substring(0, 9) == '#comment-') {
|
||||
$('#ao' + anchor.substring(9)).click();
|
||||
document.location.hash = '#s' + anchor.substring(9);
|
||||
}
|
||||
});
|
||||
92
doc/build/html/genindex.html
vendored
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
|
||||
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
|
||||
|
||||
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
|
||||
<title>Index — T-Pot 16.10 documentation</title>
|
||||
|
||||
<link rel="stylesheet" href="_static/classic.css" type="text/css" />
|
||||
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
|
||||
|
||||
<script type="text/javascript">
|
||||
var DOCUMENTATION_OPTIONS = {
|
||||
URL_ROOT: './',
|
||||
VERSION: '16.10',
|
||||
COLLAPSE_INDEX: false,
|
||||
FILE_SUFFIX: '.html',
|
||||
HAS_SOURCE: true
|
||||
};
|
||||
</script>
|
||||
<script type="text/javascript" src="_static/jquery.js"></script>
|
||||
<script type="text/javascript" src="_static/underscore.js"></script>
|
||||
<script type="text/javascript" src="_static/doctools.js"></script>
|
||||
<link rel="top" title="T-Pot 16.10 documentation" href="index.html" />
|
||||
</head>
|
||||
<body role="document">
|
||||
<div class="related" role="navigation" aria-label="related navigation">
|
||||
<h3>Navigation</h3>
|
||||
<ul>
|
||||
<li class="right" style="margin-right: 10px">
|
||||
<a href="#" title="General Index"
|
||||
accesskey="I">index</a></li>
|
||||
<li class="nav-item nav-item-0"><a href="index.html">T-Pot 16.10 documentation</a> »</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div class="document">
|
||||
<div class="documentwrapper">
|
||||
<div class="bodywrapper">
|
||||
<div class="body" role="main">
|
||||
|
||||
|
||||
<h1 id="index">Index</h1>
|
||||
|
||||
<div class="genindex-jumpbox">
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
|
||||
<div class="sphinxsidebarwrapper">
|
||||
|
||||
|
||||
|
||||
<div id="searchbox" style="display: none" role="search">
|
||||
<h3>Quick search</h3>
|
||||
<form class="search" action="search.html" method="get">
|
||||
<input type="text" name="q" />
|
||||
<input type="submit" value="Go" />
|
||||
<input type="hidden" name="check_keywords" value="yes" />
|
||||
<input type="hidden" name="area" value="default" />
|
||||
</form>
|
||||
<p class="searchtip" style="font-size: 90%">
|
||||
Enter search terms or a module, class or function name.
|
||||
</p>
|
||||
</div>
|
||||
<script type="text/javascript">$('#searchbox').show(0);</script>
|
||||
</div>
|
||||
</div>
|
||||
<div class="clearer"></div>
|
||||
</div>
|
||||
<div class="related" role="navigation" aria-label="related navigation">
|
||||
<h3>Navigation</h3>
|
||||
<ul>
|
||||
<li class="right" style="margin-right: 10px">
|
||||
<a href="#" title="General Index"
|
||||
>index</a></li>
|
||||
<li class="nav-item nav-item-0"><a href="index.html">T-Pot 16.10 documentation</a> »</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="footer" role="contentinfo">
|
||||
© Copyright 2016, t3chn0m4g3.
|
||||
Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.3.6.
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
111
doc/build/html/index.html
vendored
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
|
||||
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
|
||||
|
||||
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
|
||||
<title>Welcome to T-Pot’s documentation! — T-Pot 16.10 documentation</title>
|
||||
|
||||
<link rel="stylesheet" href="_static/classic.css" type="text/css" />
|
||||
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
|
||||
|
||||
<script type="text/javascript">
|
||||
var DOCUMENTATION_OPTIONS = {
|
||||
URL_ROOT: './',
|
||||
VERSION: '16.10',
|
||||
COLLAPSE_INDEX: false,
|
||||
FILE_SUFFIX: '.html',
|
||||
HAS_SOURCE: true
|
||||
};
|
||||
</script>
|
||||
<script type="text/javascript" src="_static/jquery.js"></script>
|
||||
<script type="text/javascript" src="_static/underscore.js"></script>
|
||||
<script type="text/javascript" src="_static/doctools.js"></script>
|
||||
<link rel="top" title="T-Pot 16.10 documentation" href="#" />
|
||||
</head>
|
||||
<body role="document">
|
||||
<div class="related" role="navigation" aria-label="related navigation">
|
||||
<h3>Navigation</h3>
|
||||
<ul>
|
||||
<li class="right" style="margin-right: 10px">
|
||||
<a href="genindex.html" title="General Index"
|
||||
accesskey="I">index</a></li>
|
||||
<li class="nav-item nav-item-0"><a href="#">T-Pot 16.10 documentation</a> »</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div class="document">
|
||||
<div class="documentwrapper">
|
||||
<div class="bodywrapper">
|
||||
<div class="body" role="main">
|
||||
|
||||
<div class="section" id="welcome-to-t-pot-s-documentation">
|
||||
<h1>Welcome to T-Pot’s documentation!<a class="headerlink" href="#welcome-to-t-pot-s-documentation" title="Permalink to this headline">¶</a></h1>
|
||||
<p>Contents:</p>
|
||||
<div class="toctree-wrapper compound">
|
||||
<ul class="simple">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
<div class="section" id="indices-and-tables">
|
||||
<h1>Indices and tables<a class="headerlink" href="#indices-and-tables" title="Permalink to this headline">¶</a></h1>
|
||||
<ul class="simple">
|
||||
<li><a class="reference internal" href="genindex.html"><span>Index</span></a></li>
|
||||
<li><a class="reference internal" href="py-modindex.html"><span>Module Index</span></a></li>
|
||||
<li><a class="reference internal" href="search.html"><span>Search Page</span></a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
|
||||
<div class="sphinxsidebarwrapper">
|
||||
<h3><a href="#">Table Of Contents</a></h3>
|
||||
<ul>
|
||||
<li><a class="reference internal" href="#">Welcome to T-Pot’s documentation!</a></li>
|
||||
<li><a class="reference internal" href="#indices-and-tables">Indices and tables</a></li>
|
||||
</ul>
|
||||
|
||||
<div role="note" aria-label="source link">
|
||||
<h3>This Page</h3>
|
||||
<ul class="this-page-menu">
|
||||
<li><a href="_sources/index.txt"
|
||||
rel="nofollow">Show Source</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
<div id="searchbox" style="display: none" role="search">
|
||||
<h3>Quick search</h3>
|
||||
<form class="search" action="search.html" method="get">
|
||||
<input type="text" name="q" />
|
||||
<input type="submit" value="Go" />
|
||||
<input type="hidden" name="check_keywords" value="yes" />
|
||||
<input type="hidden" name="area" value="default" />
|
||||
</form>
|
||||
<p class="searchtip" style="font-size: 90%">
|
||||
Enter search terms or a module, class or function name.
|
||||
</p>
|
||||
</div>
|
||||
<script type="text/javascript">$('#searchbox').show(0);</script>
|
||||
</div>
|
||||
</div>
|
||||
<div class="clearer"></div>
|
||||
</div>
|
||||
<div class="related" role="navigation" aria-label="related navigation">
|
||||
<h3>Navigation</h3>
|
||||
<ul>
|
||||
<li class="right" style="margin-right: 10px">
|
||||
<a href="genindex.html" title="General Index"
|
||||
>index</a></li>
|
||||
<li class="nav-item nav-item-0"><a href="#">T-Pot 16.10 documentation</a> »</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="footer" role="contentinfo">
|
||||
© Copyright 2016, t3chn0m4g3.
|
||||
Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.3.6.
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
BIN
doc/build/html/objects.inv
vendored
Normal file
99
doc/build/html/search.html
vendored
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
|
||||
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
|
||||
|
||||
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
|
||||
<title>Search — T-Pot 16.10 documentation</title>
|
||||
|
||||
<link rel="stylesheet" href="_static/classic.css" type="text/css" />
|
||||
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
|
||||
|
||||
<script type="text/javascript">
|
||||
var DOCUMENTATION_OPTIONS = {
|
||||
URL_ROOT: './',
|
||||
VERSION: '16.10',
|
||||
COLLAPSE_INDEX: false,
|
||||
FILE_SUFFIX: '.html',
|
||||
HAS_SOURCE: true
|
||||
};
|
||||
</script>
|
||||
<script type="text/javascript" src="_static/jquery.js"></script>
|
||||
<script type="text/javascript" src="_static/underscore.js"></script>
|
||||
<script type="text/javascript" src="_static/doctools.js"></script>
|
||||
<script type="text/javascript" src="_static/searchtools.js"></script>
|
||||
<link rel="top" title="T-Pot 16.10 documentation" href="index.html" />
|
||||
<script type="text/javascript">
|
||||
jQuery(function() { Search.loadIndex("searchindex.js"); });
|
||||
</script>
|
||||
|
||||
<script type="text/javascript" id="searchindexloader"></script>
|
||||
|
||||
|
||||
</head>
|
||||
<body role="document">
|
||||
<div class="related" role="navigation" aria-label="related navigation">
|
||||
<h3>Navigation</h3>
|
||||
<ul>
|
||||
<li class="right" style="margin-right: 10px">
|
||||
<a href="genindex.html" title="General Index"
|
||||
accesskey="I">index</a></li>
|
||||
<li class="nav-item nav-item-0"><a href="index.html">T-Pot 16.10 documentation</a> »</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div class="document">
|
||||
<div class="documentwrapper">
|
||||
<div class="bodywrapper">
|
||||
<div class="body" role="main">
|
||||
|
||||
<h1 id="search-documentation">Search</h1>
|
||||
<div id="fallback" class="admonition warning">
|
||||
<script type="text/javascript">$('#fallback').hide();</script>
|
||||
<p>
|
||||
Please activate JavaScript to enable the search
|
||||
functionality.
|
||||
</p>
|
||||
</div>
|
||||
<p>
|
||||
From here you can search these documents. Enter your search
|
||||
words into the box below and click "search". Note that the search
|
||||
function will automatically search for all of the words. Pages
|
||||
containing fewer words won't appear in the result list.
|
||||
</p>
|
||||
<form action="" method="get">
|
||||
<input type="text" name="q" value="" />
|
||||
<input type="submit" value="search" />
|
||||
<span id="search-progress" style="padding-left: 10px"></span>
|
||||
</form>
|
||||
|
||||
<div id="search-results">
|
||||
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
|
||||
<div class="sphinxsidebarwrapper">
|
||||
</div>
|
||||
</div>
|
||||
<div class="clearer"></div>
|
||||
</div>
|
||||
<div class="related" role="navigation" aria-label="related navigation">
|
||||
<h3>Navigation</h3>
|
||||
<ul>
|
||||
<li class="right" style="margin-right: 10px">
|
||||
<a href="genindex.html" title="General Index"
|
||||
>index</a></li>
|
||||
<li class="nav-item nav-item-0"><a href="index.html">T-Pot 16.10 documentation</a> »</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="footer" role="contentinfo">
|
||||
© Copyright 2016, t3chn0m4g3.
|
||||
Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.3.6.
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
1
doc/build/html/searchindex.js
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
Search.setIndex({envversion:46,filenames:["index"],objects:{},objnames:{},objtypes:{},terms:{content:0,index:0,modul:0,page:0,search:0},titles:["Welcome to T-Pot’s documentation!"],titleterms:{document:0,indic:0,pot:0,tabl:0,welcom:0}})
|
||||
|
Before Width: | Height: | Size: 117 KiB |
2737
doc/dashboard.json
Normal file
BIN
doc/dashboard.png
Normal file
|
After Width: | Height: | Size: 319 KiB |
BIN
doc/dockerui.png
Normal file
|
After Width: | Height: | Size: 103 KiB |
|
Before Width: | Height: | Size: 174 KiB |
BIN
doc/headplugin.png
Normal file
|
After Width: | Height: | Size: 84 KiB |
BIN
doc/kibana_a.png
|
Before Width: | Height: | Size: 608 KiB |
BIN
doc/kibana_b.png
|
Before Width: | Height: | Size: 98 KiB |
BIN
doc/kibana_c.png
|
Before Width: | Height: | Size: 310 KiB |
263
doc/make.bat
Normal file
|
|
@ -0,0 +1,263 @@
|
|||
@ECHO OFF
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
)
|
||||
set BUILDDIR=build
|
||||
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source
|
||||
set I18NSPHINXOPTS=%SPHINXOPTS% source
|
||||
if NOT "%PAPER%" == "" (
|
||||
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
|
||||
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
|
||||
)
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
if "%1" == "help" (
|
||||
:help
|
||||
echo.Please use `make ^<target^>` where ^<target^> is one of
|
||||
echo. html to make standalone HTML files
|
||||
echo. dirhtml to make HTML files named index.html in directories
|
||||
echo. singlehtml to make a single large HTML file
|
||||
echo. pickle to make pickle files
|
||||
echo. json to make JSON files
|
||||
echo. htmlhelp to make HTML files and a HTML help project
|
||||
echo. qthelp to make HTML files and a qthelp project
|
||||
echo. devhelp to make HTML files and a Devhelp project
|
||||
echo. epub to make an epub
|
||||
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
|
||||
echo. text to make text files
|
||||
echo. man to make manual pages
|
||||
echo. texinfo to make Texinfo files
|
||||
echo. gettext to make PO message catalogs
|
||||
echo. changes to make an overview over all changed/added/deprecated items
|
||||
echo. xml to make Docutils-native XML files
|
||||
echo. pseudoxml to make pseudoxml-XML files for display purposes
|
||||
echo. linkcheck to check all external links for integrity
|
||||
echo. doctest to run all doctests embedded in the documentation if enabled
|
||||
echo. coverage to run coverage check of the documentation if enabled
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "clean" (
|
||||
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
|
||||
del /q /s %BUILDDIR%\*
|
||||
goto end
|
||||
)
|
||||
|
||||
|
||||
REM Check if sphinx-build is available and fallback to Python version if any
|
||||
%SPHINXBUILD% 1>NUL 2>NUL
|
||||
if errorlevel 9009 goto sphinx_python
|
||||
goto sphinx_ok
|
||||
|
||||
:sphinx_python
|
||||
|
||||
set SPHINXBUILD=python -m sphinx.__init__
|
||||
%SPHINXBUILD% 2> nul
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||
echo.may add the Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.http://sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
:sphinx_ok
|
||||
|
||||
|
||||
if "%1" == "html" (
|
||||
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "dirhtml" (
|
||||
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "singlehtml" (
|
||||
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pickle" (
|
||||
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the pickle files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "json" (
|
||||
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the JSON files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "htmlhelp" (
|
||||
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run HTML Help Workshop with the ^
|
||||
.hhp project file in %BUILDDIR%/htmlhelp.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "qthelp" (
|
||||
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run "qcollectiongenerator" with the ^
|
||||
.qhcp project file in %BUILDDIR%/qthelp, like this:
|
||||
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\T-Pot.qhcp
|
||||
echo.To view the help file:
|
||||
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\T-Pot.ghc
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "devhelp" (
|
||||
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "epub" (
|
||||
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The epub file is in %BUILDDIR%/epub.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latex" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdf" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf
|
||||
cd %~dp0
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdfja" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf-ja
|
||||
cd %~dp0
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "text" (
|
||||
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The text files are in %BUILDDIR%/text.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "man" (
|
||||
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The manual pages are in %BUILDDIR%/man.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "texinfo" (
|
||||
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "gettext" (
|
||||
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "changes" (
|
||||
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.The overview file is in %BUILDDIR%/changes.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "linkcheck" (
|
||||
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Link check complete; look for any errors in the above output ^
|
||||
or in %BUILDDIR%/linkcheck/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "doctest" (
|
||||
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of doctests in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/doctest/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "coverage" (
|
||||
%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of coverage in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/coverage/python.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "xml" (
|
||||
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The XML files are in %BUILDDIR%/xml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pseudoxml" (
|
||||
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
|
||||
goto end
|
||||
)
|
||||
|
||||
:end
|
||||
BIN
doc/netdata.png
Normal file
|
After Width: | Height: | Size: 211 KiB |
285
doc/source/conf.py
Normal file
|
|
@ -0,0 +1,285 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# T-Pot documentation build configuration file, created by
|
||||
# sphinx-quickstart on Mon Aug 8 13:24:39 2016.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
# source_suffix = ['.rst', '.md']
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'T-Pot'
|
||||
copyright = u'2016, t3chn0m4g3'
|
||||
author = u't3chn0m4g3'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = u'0.0.1'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = u'16.10'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = []
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (relative to this directory) to use as a favicon of
|
||||
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
#html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Language to be used for generating the HTML full-text search index.
|
||||
# Sphinx supports the following languages:
|
||||
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
|
||||
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
|
||||
#html_search_language = 'en'
|
||||
|
||||
# A dictionary with options for the search language support, empty by default.
|
||||
# Now only 'ja' uses this config value
|
||||
#html_search_options = {'type': 'default'}
|
||||
|
||||
# The name of a javascript file (relative to the configuration directory) that
|
||||
# implements a search results scorer. If empty, the default will be used.
|
||||
#html_search_scorer = 'scorer.js'
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'T-Potdoc'
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'T-Pot.tex', u'T-Pot Documentation',
|
||||
u't3chn0m4g3', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 't-pot', u'T-Pot Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'T-Pot', u'T-Pot Documentation',
|
||||
author, 'T-Pot', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
22
doc/source/index.rst
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
.. T-Pot documentation master file, created by
|
||||
sphinx-quickstart on Mon Aug 8 13:24:39 2016.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to T-Pot's documentation!
|
||||
=================================
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
||||
|
Before Width: | Height: | Size: 162 KiB |
BIN
doc/t-pot_qr.png
|
Before Width: | Height: | Size: 92 KiB |
|
Before Width: | Height: | Size: 252 KiB |
|
Before Width: | Height: | Size: 606 KiB |
|
Before Width: | Height: | Size: 148 KiB |
|
Before Width: | Height: | Size: 486 KiB |
BIN
doc/webssh.png
Normal file
|
After Width: | Height: | Size: 61 KiB |
|
|
@ -1,856 +0,0 @@
|
|||
# T-Pot: STANDARD
|
||||
networks:
|
||||
adbhoney_local:
|
||||
ciscoasa_local:
|
||||
conpot_local_IEC104:
|
||||
conpot_local_guardian_ast:
|
||||
conpot_local_ipmi:
|
||||
conpot_local_kamstrup_382:
|
||||
cowrie_local:
|
||||
dicompot_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
h0neytr4p_local:
|
||||
heralding_local:
|
||||
honeyaml_local:
|
||||
ipphoney_local:
|
||||
mailoney_local:
|
||||
medpot_local:
|
||||
miniprint_local:
|
||||
redishoneypot_local:
|
||||
sentrypeer_local:
|
||||
tanner_local:
|
||||
wordpot_local:
|
||||
nginx_local:
|
||||
ewsposter_local:
|
||||
|
||||
services:
|
||||
|
||||
#########################################
|
||||
#### DEV
|
||||
#########################################
|
||||
#### T-Pot Init - Never delete this!
|
||||
#########################################
|
||||
|
||||
# T-Pot Init Service
|
||||
tpotinit:
|
||||
container_name: tpotinit
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
stop_grace_period: 60s
|
||||
tmpfs:
|
||||
- /tmp/etc:uid=2000,gid=2000
|
||||
- /tmp/:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Adbhoney service
|
||||
adbhoney:
|
||||
container_name: adbhoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- adbhoney_local
|
||||
ports:
|
||||
- "5555:5555"
|
||||
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
||||
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# Conpot IEC104 service
|
||||
conpot_IEC104:
|
||||
container_name: conpot_iec104
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
||||
- CONPOT_TEMPLATE=IEC104
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_IEC104
|
||||
ports:
|
||||
- "161:161/udp"
|
||||
- "2404:2404"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot guardian_ast service
|
||||
conpot_guardian_ast:
|
||||
container_name: conpot_guardian_ast
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
|
||||
- CONPOT_TEMPLATE=guardian_ast
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_guardian_ast
|
||||
ports:
|
||||
- "10001:10001"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot ipmi
|
||||
conpot_ipmi:
|
||||
container_name: conpot_ipmi
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
||||
- CONPOT_TEMPLATE=ipmi
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_ipmi
|
||||
ports:
|
||||
- "623:623/udp"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Conpot kamstrup_382
|
||||
conpot_kamstrup_382:
|
||||
container_name: conpot_kamstrup_382
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
||||
- CONPOT_TEMPLATE=kamstrup_382
|
||||
- CONPOT_TMP=/tmp/conpot
|
||||
tmpfs:
|
||||
- /tmp/conpot:uid=2000,gid=2000
|
||||
networks:
|
||||
- conpot_local_kamstrup_382
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/cowrie:uid=2000,gid=2000
|
||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
||||
networks:
|
||||
- cowrie_local
|
||||
ports:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dicompot service
|
||||
# Get the Horos Client for testing: https://horosproject.org/
|
||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
||||
dicompot:
|
||||
container_name: dicompot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dicompot_local
|
||||
ports:
|
||||
- "104:11112"
|
||||
- "11112:11112"
|
||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- dionaea_local
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "81:81"
|
||||
- "135:135"
|
||||
# - "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "3306:3306"
|
||||
# - "5060:5060"
|
||||
# - "5060:5060/udp"
|
||||
# - "5061:5061"
|
||||
- "27017:27017"
|
||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# ElasticPot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
||||
|
||||
# H0neytr4p service
|
||||
h0neytr4p:
|
||||
container_name: h0neytr4p
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- h0neytr4p_local
|
||||
ports:
|
||||
- "443:443"
|
||||
# - "80:80"
|
||||
image: ${TPOT_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/log/:/opt/h0neytr4p/log/
|
||||
- ${TPOT_DATA_PATH}/h0neytr4p/payloads/:/data/h0neytr4p/payloads/
|
||||
|
||||
# Heralding service
|
||||
heralding:
|
||||
container_name: heralding
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/heralding:uid=2000,gid=2000
|
||||
networks:
|
||||
- heralding_local
|
||||
ports:
|
||||
# - "21:21"
|
||||
# - "22:22"
|
||||
# - "23:23"
|
||||
# - "25:25"
|
||||
# - "80:80"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
# - "443:443"
|
||||
- "465:465"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
# - "3306:3306"
|
||||
# - "3389:3389"
|
||||
- "1080:1080"
|
||||
- "5432:5432"
|
||||
- "5900:5900"
|
||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
||||
|
||||
# Honeyaml service
|
||||
honeyaml:
|
||||
container_name: honeyaml
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- honeyaml_local
|
||||
ports:
|
||||
- "3000:8080"
|
||||
image: ${TPOT_REPO}/honeyaml:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeyaml/log:/opt/honeyaml/log/
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /tmp/honeytrap:uid=2000,gid=2000
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Ipphoney service
|
||||
ipphoney:
|
||||
container_name: ipphoney
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ipphoney_local
|
||||
ports:
|
||||
- "631:631"
|
||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:25"
|
||||
- "587:25"
|
||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Medpot service
|
||||
medpot:
|
||||
container_name: medpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- medpot_local
|
||||
ports:
|
||||
- "2575:2575"
|
||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
||||
|
||||
# Miniprint service
|
||||
miniprint:
|
||||
container_name: miniprint
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- miniprint_local
|
||||
ports:
|
||||
- "9100:9100"
|
||||
image: ${TPOT_REPO}/miniprint:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/miniprint/log/:/opt/miniprint/log/
|
||||
- ${TPOT_DATA_PATH}/miniprint/uploads/:/opt/miniprint/uploads/
|
||||
|
||||
# Redishoneypot service
|
||||
redishoneypot:
|
||||
container_name: redishoneypot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- redishoneypot_local
|
||||
ports:
|
||||
- "6379:6379"
|
||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
||||
|
||||
# SentryPeer service
|
||||
sentrypeer:
|
||||
container_name: sentrypeer
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
||||
|
||||
#### Snare / Tanner
|
||||
## Tanner Redis Service
|
||||
tanner_redis:
|
||||
container_name: tanner_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## PHP Sandbox service
|
||||
tanner_phpox:
|
||||
container_name: tanner_phpox
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Tanner API Service
|
||||
tanner_api:
|
||||
container_name: tanner_api
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_redis
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
command: tannerapi
|
||||
|
||||
## Tanner Service
|
||||
tanner:
|
||||
container_name: tanner
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner_api
|
||||
- tanner_phpox
|
||||
tmpfs:
|
||||
- /tmp/tanner:uid=2000,gid=2000
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
command: tanner
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
||||
|
||||
## Snare Service
|
||||
snare:
|
||||
container_name: snare
|
||||
restart: always
|
||||
depends_on:
|
||||
- tanner
|
||||
tty: true
|
||||
networks:
|
||||
- tanner_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
# Wordpot service
|
||||
wordpot:
|
||||
container_name: wordpot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- wordpot_local
|
||||
ports:
|
||||
- "8080:80"
|
||||
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/logs/
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt service
|
||||
fatt:
|
||||
container_name: fatt
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
network_mode: "host"
|
||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
||||
# Loading external Rules from URL
|
||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
||||
- ES_TMPDIR=/tmp
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
||||
- LS_SSL_VERIFICATION=${LS_SSL_VERIFICATION:-full}
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
mem_limit: 2g
|
||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
|
||||
## Map Redis Service
|
||||
map_redis:
|
||||
container_name: map_redis
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
|
||||
## Map Web Service
|
||||
map_web:
|
||||
container_name: map_web
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=AttackMapServer.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
|
||||
## Map Data Service
|
||||
map_data:
|
||||
container_name: map_data
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
environment:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
#### /ELK
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- ewsposter_local
|
||||
environment:
|
||||
- EWS_HPFEEDS_ENABLE=false
|
||||
- EWS_HPFEEDS_HOST=host
|
||||
- EWS_HPFEEDS_PORT=port
|
||||
- EWS_HPFEEDS_CHANNELS=channels
|
||||
- EWS_HPFEEDS_IDENT=user
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}:/data
|
||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Nginx service
|
||||
nginx:
|
||||
container_name: nginx
|
||||
restart: always
|
||||
environment:
|
||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
tmpfs:
|
||||
- /var/tmp/nginx/client_body
|
||||
- /var/tmp/nginx/proxy
|
||||
- /var/tmp/nginx/fastcgi
|
||||
- /var/tmp/nginx/uwsgi
|
||||
- /var/tmp/nginx/scgi
|
||||
- /run
|
||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "64297:64297"
|
||||
- "64294:64294"
|
||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
|
||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nginx_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# T-Pot builder config file. Do not remove.
|
||||
|
||||
##########################
|
||||
# T-Pot Builder Settings #
|
||||
##########################
|
||||
|
||||
# docker compose .env
|
||||
TPOT_DOCKER_ENV=./.env
|
||||
|
||||
# Docker-Compose file
|
||||
TPOT_DOCKER_COMPOSE=./docker-compose.yml
|
||||
|
||||
# T-Pot Repos
|
||||
TPOT_DOCKER_REPO=dtagdevsec
|
||||
TPOT_GHCR_REPO=ghcr.io/telekom-security
|
||||
|
||||
# T-Pot Version Tag
|
||||
TPOT_VERSION=24.04.1
|
||||
|
||||
# T-Pot platforms (architectures)
|
||||
# Most docker features are available on linux
|
||||
TPOT_AMD64=linux/amd64
|
||||
TPOT_ARM64=linux/arm64
|
||||
|
|
@ -1,202 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Got root?
|
||||
myWHOAMI=$(whoami)
|
||||
if [ "$myWHOAMI" != "root" ]
|
||||
then
|
||||
echo "Need to run as root ..."
|
||||
exit
|
||||
fi
|
||||
|
||||
# ANSI color codes for green (OK) and red (FAIL)
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Default settings
|
||||
PUSH_IMAGES=false
|
||||
NO_CACHE=false
|
||||
PARALLELBUILDS=2
|
||||
UPLOAD_BANDWIDTH=40mbit # Set this to max 90% of available upload bandwidth
|
||||
INTERFACE=$(ip route | grep "^default" | awk '{ print $5 }')
|
||||
|
||||
# Help message
|
||||
usage() {
|
||||
echo "Usage: $0 [-p] [-n] [-h]"
|
||||
echo " -p Push images after building"
|
||||
echo " -n Build images with --no-cache"
|
||||
echo " -h Show help message"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Parse command-line options
|
||||
while getopts ":pnh" opt; do
|
||||
case ${opt} in
|
||||
p )
|
||||
PUSH_IMAGES=true
|
||||
docker login
|
||||
docker login ghcr.io
|
||||
;;
|
||||
n )
|
||||
NO_CACHE=true
|
||||
;;
|
||||
h )
|
||||
usage
|
||||
;;
|
||||
\? )
|
||||
echo "Invalid option: $OPTARG" 1>&2
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Function to apply upload bandwidth limit using tc
|
||||
apply_bandwidth_limit() {
|
||||
echo -n "Applying upload bandwidth limit of $UPLOAD_BANDWIDTH on interface $INTERFACE..."
|
||||
if tc qdisc add dev $INTERFACE root tbf rate $UPLOAD_BANDWIDTH burst 32kbit latency 400ms >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
remove_bandwidth_limit
|
||||
|
||||
# Try to reapply the limit
|
||||
echo -n "Reapplying upload bandwidth limit of $UPLOAD_BANDWIDTH on interface $INTERFACE..."
|
||||
if tc qdisc add dev $INTERFACE root tbf rate $UPLOAD_BANDWIDTH burst 32kbit latency 400ms >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
echo "Failed to apply bandwidth limit on $INTERFACE. Exiting."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to check if the bandwidth limit is set
|
||||
is_bandwidth_limit_set() {
|
||||
tc qdisc show dev $INTERFACE | grep -q 'tbf'
|
||||
}
|
||||
|
||||
# Function to remove the bandwidth limit using tc if it is set
|
||||
remove_bandwidth_limit() {
|
||||
if is_bandwidth_limit_set; then
|
||||
echo -n "Removing upload bandwidth limit on interface $INTERFACE..."
|
||||
if tc qdisc del dev $INTERFACE root; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
echo "###########################"
|
||||
echo "# T-Pot Image Builder"
|
||||
echo "###########################"
|
||||
echo
|
||||
|
||||
# Check if 'mybuilder' exists, and ensure it's running with bootstrap
|
||||
echo -n "Checking if buildx builder 'mybuilder' exists and is running..."
|
||||
if ! docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
|
||||
echo
|
||||
echo -n " Creating and starting buildx builder 'mybuilder'..."
|
||||
if docker buildx create --name mybuilder --driver docker-container --use >/dev/null 2>&1 && \
|
||||
docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
fi
|
||||
|
||||
# Ensure arm64 and amd64 platforms are active
|
||||
echo -n "Ensuring 'mybuilder' supports linux/arm64 and linux/amd64..."
|
||||
|
||||
# Get active platforms from buildx
|
||||
active_platforms=$(docker buildx inspect mybuilder --bootstrap | grep -oP '(?<=Platforms: ).*')
|
||||
|
||||
if [[ "$active_platforms" == *"linux/arm64"* && "$active_platforms" == *"linux/amd64"* ]]; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo
|
||||
echo -n " Enabling platforms linux/arm64 and linux/amd64..."
|
||||
if docker buildx create --name mybuilder --driver docker-container --use --platform linux/amd64,linux/arm64 >/dev/null 2>&1 && \
|
||||
docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Ensure QEMU is set up for cross-platform builds
|
||||
echo -n "Ensuring QEMU is configured for cross-platform builds..."
|
||||
if docker run --rm --privileged tonistiigi/binfmt --install all > /dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
fi
|
||||
|
||||
# Apply bandwidth limit only if pushing images
|
||||
if $PUSH_IMAGES; then
|
||||
echo
|
||||
echo "########################################"
|
||||
echo "# Setting Upload Bandwidth limit ..."
|
||||
echo "########################################"
|
||||
echo
|
||||
apply_bandwidth_limit
|
||||
fi
|
||||
|
||||
# Trap to ensure bandwidth limit is removed on script error, exit
|
||||
trap_cleanup() {
|
||||
if is_bandwidth_limit_set; then
|
||||
remove_bandwidth_limit
|
||||
fi
|
||||
}
|
||||
trap trap_cleanup INT ERR EXIT
|
||||
|
||||
echo
|
||||
echo "################################"
|
||||
echo "# Now building images ..."
|
||||
echo "################################"
|
||||
echo
|
||||
|
||||
mkdir -p log
|
||||
|
||||
# List of services to build
|
||||
services=$(docker compose config --services | sort)
|
||||
|
||||
# Loop through each service to build
|
||||
echo $services | tr ' ' '\n' | xargs -I {} -P $PARALLELBUILDS bash -c '
|
||||
echo "Building image: {}" && \
|
||||
build_cmd="docker compose build {}" && \
|
||||
if '$PUSH_IMAGES'; then \
|
||||
build_cmd="$build_cmd --push"; \
|
||||
fi && \
|
||||
if '$NO_CACHE'; then \
|
||||
build_cmd="$build_cmd --no-cache"; \
|
||||
fi && \
|
||||
eval "$build_cmd 2>&1 > log/{}.log" && \
|
||||
echo -e "Image {}: ['$GREEN'OK'$NC']" || \
|
||||
echo -e "Image {}: ['$RED'FAIL'$NC']"
|
||||
'
|
||||
|
||||
# Remove bandwidth limit if it was applied
|
||||
if is_bandwidth_limit_set; then
|
||||
echo
|
||||
echo "########################################"
|
||||
echo "# Removiong Upload Bandwidth limit ..."
|
||||
echo "########################################"
|
||||
echo
|
||||
remove_bandwidth_limit
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "#######################################################"
|
||||
echo "# Done."
|
||||
if ! "$PUSH_IMAGES"; then
|
||||
echo "# Remeber to push the images using push option."
|
||||
fi
|
||||
echo "#######################################################"
|
||||
echo
|
||||
|
|
@ -1,421 +0,0 @@
|
|||
# T-Pot Docker Compose Image Builder (use only for building docker images)
|
||||
# Settings in .env
|
||||
|
||||
##################
|
||||
#### Anchors
|
||||
##################
|
||||
|
||||
# Common build config
|
||||
x-common-build: &common-build
|
||||
dockerfile: ./Dockerfile
|
||||
platforms:
|
||||
- ${TPOT_AMD64}
|
||||
- ${TPOT_ARM64}
|
||||
|
||||
services:
|
||||
|
||||
##################
|
||||
#### Honeypots
|
||||
##################
|
||||
|
||||
# Adbhoney
|
||||
adbhoney:
|
||||
image: ${TPOT_DOCKER_REPO}/adbhoney:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/adbhoney:${TPOT_VERSION}
|
||||
context: ../adbhoney/
|
||||
<<: *common-build
|
||||
|
||||
# Beelzebub
|
||||
beelzebub:
|
||||
image: ${TPOT_DOCKER_REPO}/beelzebub:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/beelzebub:${TPOT_VERSION}
|
||||
context: ../beelzebub/
|
||||
<<: *common-build
|
||||
|
||||
# Ciscoasa
|
||||
ciscoasa:
|
||||
image: ${TPOT_DOCKER_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/ciscoasa:${TPOT_VERSION}
|
||||
context: ../ciscoasa/
|
||||
<<: *common-build
|
||||
|
||||
# Citrixhoneypot
|
||||
citrixhoneypot:
|
||||
image: ${TPOT_DOCKER_REPO}/citrixhoneypot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/citrixhoneypot:${TPOT_VERSION}
|
||||
context: ../citrixhoneypot/
|
||||
<<: *common-build
|
||||
|
||||
# Conpot
|
||||
conpot:
|
||||
image: ${TPOT_DOCKER_REPO}/conpot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/conpot:${TPOT_VERSION}
|
||||
context: ../conpot/
|
||||
<<: *common-build
|
||||
|
||||
# Cowrie
|
||||
cowrie:
|
||||
image: ${TPOT_DOCKER_REPO}/cowrie:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/cowrie:${TPOT_VERSION}
|
||||
context: ../cowrie/
|
||||
<<: *common-build
|
||||
|
||||
# Ddospot
|
||||
ddospot:
|
||||
image: ${TPOT_DOCKER_REPO}/ddospot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/ddospot:${TPOT_VERSION}
|
||||
context: ../ddospot/
|
||||
<<: *common-build
|
||||
|
||||
# Dicompot
|
||||
dicompot:
|
||||
image: ${TPOT_DOCKER_REPO}/dicompot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/dicompot:${TPOT_VERSION}
|
||||
context: ../dicompot/
|
||||
<<: *common-build
|
||||
|
||||
# Dionaea
|
||||
dionaea:
|
||||
image: ${TPOT_DOCKER_REPO}/dionaea:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/dionaea:${TPOT_VERSION}
|
||||
context: ../dionaea/
|
||||
<<: *common-build
|
||||
|
||||
# Elasticpot
|
||||
elasticpot:
|
||||
image: ${TPOT_DOCKER_REPO}/elasticpot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/elasticpot:${TPOT_VERSION}
|
||||
context: ../elasticpot/
|
||||
<<: *common-build
|
||||
|
||||
# Endlessh
|
||||
endlessh:
|
||||
image: ${TPOT_DOCKER_REPO}/endlessh:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/endlessh:${TPOT_VERSION}
|
||||
context: ../endlessh/
|
||||
<<: *common-build
|
||||
|
||||
# Galah
|
||||
galah:
|
||||
image: ${TPOT_DOCKER_REPO}/galah:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/galah:${TPOT_VERSION}
|
||||
context: ../galah/
|
||||
<<: *common-build
|
||||
|
||||
# Glutton
|
||||
glutton:
|
||||
image: ${TPOT_DOCKER_REPO}/glutton:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/glutton:${TPOT_VERSION}
|
||||
context: ../glutton/
|
||||
<<: *common-build
|
||||
|
||||
# Go-pot
|
||||
go-pot:
|
||||
image: ${TPOT_DOCKER_REPO}/go-pot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/go-pot:${TPOT_VERSION}
|
||||
context: ../go-pot/
|
||||
<<: *common-build
|
||||
|
||||
# H0neytr4p
|
||||
h0neytr4p:
|
||||
image: ${TPOT_DOCKER_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/h0neytr4p:${TPOT_VERSION}
|
||||
context: ../h0neytr4p/
|
||||
<<: *common-build
|
||||
|
||||
# Hellpot
|
||||
hellpot:
|
||||
image: ${TPOT_DOCKER_REPO}/hellpot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/hellpot:${TPOT_VERSION}
|
||||
context: ../hellpot/
|
||||
<<: *common-build
|
||||
|
||||
# Herlading
|
||||
heralding:
|
||||
image: ${TPOT_DOCKER_REPO}/heralding:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/heralding:${TPOT_VERSION}
|
||||
context: ../heralding/
|
||||
<<: *common-build
|
||||
|
||||
# Honeyaml
|
||||
honeyaml:
|
||||
image: ${TPOT_DOCKER_REPO}/honeyaml:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/honeyaml:${TPOT_VERSION}
|
||||
context: ../honeyaml/
|
||||
<<: *common-build
|
||||
|
||||
# Honeypots
|
||||
honeypots:
|
||||
image: ${TPOT_DOCKER_REPO}/honeypots:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/honeypots:${TPOT_VERSION}
|
||||
context: ../honeypots/
|
||||
<<: *common-build
|
||||
|
||||
# Honeytrap
|
||||
honeytrap:
|
||||
image: ${TPOT_DOCKER_REPO}/honeytrap:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/honeytrap:${TPOT_VERSION}
|
||||
context: ../honeytrap/
|
||||
<<: *common-build
|
||||
|
||||
# Ipphoney
|
||||
ipphoney:
|
||||
image: ${TPOT_DOCKER_REPO}/ipphoney:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/ipphoney:${TPOT_VERSION}
|
||||
context: ../ipphoney/
|
||||
<<: *common-build
|
||||
|
||||
# Log4pot
|
||||
log4pot:
|
||||
image: ${TPOT_DOCKER_REPO}/log4pot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/log4pot:${TPOT_VERSION}
|
||||
context: ../log4pot/
|
||||
<<: *common-build
|
||||
|
||||
# Mailoney
|
||||
mailoney:
|
||||
image: ${TPOT_DOCKER_REPO}/mailoney:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/mailoney:${TPOT_VERSION}
|
||||
context: ../mailoney/
|
||||
<<: *common-build
|
||||
|
||||
# Medpot
|
||||
medpot:
|
||||
image: ${TPOT_DOCKER_REPO}/medpot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/medpot:${TPOT_VERSION}
|
||||
context: ../medpot/
|
||||
<<: *common-build
|
||||
|
||||
# Miniprint
|
||||
miniprint:
|
||||
image: ${TPOT_DOCKER_REPO}/miniprint:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/miniprint:${TPOT_VERSION}
|
||||
context: ../miniprint/
|
||||
<<: *common-build
|
||||
|
||||
# Redishoneypot
|
||||
redishoneypot:
|
||||
image: ${TPOT_DOCKER_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/redishoneypot:${TPOT_VERSION}
|
||||
context: ../redishoneypot/
|
||||
<<: *common-build
|
||||
|
||||
# Sentrypeer
|
||||
sentrypeer:
|
||||
image: ${TPOT_DOCKER_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
context: ../sentrypeer/
|
||||
<<: *common-build
|
||||
|
||||
#### Snare / Tanner
|
||||
## Tanner Redis
|
||||
redis:
|
||||
image: ${TPOT_DOCKER_REPO}/redis:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/redis:${TPOT_VERSION}
|
||||
context: ../tanner/redis/
|
||||
<<: *common-build
|
||||
|
||||
## PHP Sandbox
|
||||
phpox:
|
||||
image: ${TPOT_DOCKER_REPO}/phpox:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/phpox:${TPOT_VERSION}
|
||||
context: ../tanner/phpox/
|
||||
<<: *common-build
|
||||
|
||||
## Tanner
|
||||
tanner:
|
||||
image: ${TPOT_DOCKER_REPO}/tanner:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/tanner:${TPOT_VERSION}
|
||||
context: ../tanner/tanner/
|
||||
<<: *common-build
|
||||
|
||||
## Snare
|
||||
snare:
|
||||
image: ${TPOT_DOCKER_REPO}/snare:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/snare:${TPOT_VERSION}
|
||||
context: ../tanner/snare/
|
||||
<<: *common-build
|
||||
####
|
||||
|
||||
# Wordpot
|
||||
wordpot:
|
||||
image: ${TPOT_DOCKER_REPO}/wordpot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/wordpot:${TPOT_VERSION}
|
||||
context: ../wordpot/
|
||||
<<: *common-build
|
||||
|
||||
|
||||
##################
|
||||
#### NSM
|
||||
##################
|
||||
|
||||
# Fatt
|
||||
fatt:
|
||||
image: ${TPOT_DOCKER_REPO}/fatt:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/fatt:${TPOT_VERSION}
|
||||
context: ../fatt/
|
||||
<<: *common-build
|
||||
|
||||
# P0f
|
||||
p0f:
|
||||
image: ${TPOT_DOCKER_REPO}/p0f:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/p0f:${TPOT_VERSION}
|
||||
context: ../p0f/
|
||||
<<: *common-build
|
||||
|
||||
# Suricata
|
||||
suricata:
|
||||
image: ${TPOT_DOCKER_REPO}/suricata:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/suricata:${TPOT_VERSION}
|
||||
context: ../suricata/
|
||||
<<: *common-build
|
||||
|
||||
|
||||
##################
|
||||
#### Tools
|
||||
##################
|
||||
|
||||
# T-Pot Init
|
||||
tpotinit:
|
||||
image: ${TPOT_DOCKER_REPO}/tpotinit:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/tpotinit:${TPOT_VERSION}
|
||||
context: ../tpotinit/
|
||||
<<: *common-build
|
||||
|
||||
#### ELK
|
||||
## Elasticsearch
|
||||
elasticsearch:
|
||||
image: ${TPOT_DOCKER_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/elasticsearch:${TPOT_VERSION}
|
||||
context: ../elk/elasticsearch/
|
||||
<<: *common-build
|
||||
|
||||
## Kibana
|
||||
kibana:
|
||||
image: ${TPOT_DOCKER_REPO}/kibana:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/kibana:${TPOT_VERSION}
|
||||
context: ../elk/kibana/
|
||||
<<: *common-build
|
||||
|
||||
## Logstash
|
||||
logstash:
|
||||
image: ${TPOT_DOCKER_REPO}/logstash:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/logstash:${TPOT_VERSION}
|
||||
context: ../elk/logstash/
|
||||
<<: *common-build
|
||||
|
||||
## Map Web
|
||||
map:
|
||||
image: ${TPOT_DOCKER_REPO}/map:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/map:${TPOT_VERSION}
|
||||
context: ../elk/map/
|
||||
<<: *common-build
|
||||
####
|
||||
|
||||
# Ewsposter
|
||||
ewsposter:
|
||||
image: ${TPOT_DOCKER_REPO}/ewsposter:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/ewsposter:${TPOT_VERSION}
|
||||
context: ../ewsposter/
|
||||
<<: *common-build
|
||||
|
||||
# Nginx
|
||||
nginx:
|
||||
image: ${TPOT_DOCKER_REPO}/nginx:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/nginx:${TPOT_VERSION}
|
||||
context: ../nginx/
|
||||
<<: *common-build
|
||||
|
||||
# Spiderfoot
|
||||
spiderfoot:
|
||||
image: ${TPOT_DOCKER_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
build:
|
||||
tags:
|
||||
- ${TPOT_GHCR_REPO}/spiderfoot:${TPOT_VERSION}
|
||||
context: ../spiderfoot/
|
||||
<<: *common-build
|
||||
|
||||
|
|
@ -1,98 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# ANSI color codes for green (OK) and red (FAIL)
|
||||
BLUE='\033[0;34m'
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Check if the user is in the docker group
|
||||
if ! groups $(whoami) | grep &>/dev/null '\bdocker\b'; then
|
||||
echo -e "${RED}You need to be in the docker group to run this script without root privileges.${NC}"
|
||||
echo "Please run the following command to add yourself to the docker group:"
|
||||
echo " sudo usermod -aG docker $(whoami)"
|
||||
echo "Then log out and log back in or run the script with sudo."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Command-line switch check
|
||||
if [ "$1" != "-y" ]; then
|
||||
echo "### Setting up Docker for Multi-Arch Builds."
|
||||
echo "### Requires Docker packages from https://get.docker.com/"
|
||||
echo "### Run with -y if you meet the requirements!"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if the mybuilder exists and is running
|
||||
echo -n "Checking if buildx builder 'mybuilder' exists and is running..."
|
||||
if ! docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
|
||||
echo
|
||||
echo -n " Creating and starting buildx builder 'mybuilder'..."
|
||||
if docker buildx create --name mybuilder --driver docker-container --use >/dev/null 2>&1 && \
|
||||
docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
fi
|
||||
|
||||
# Ensure QEMU is set up for cross-platform builds
|
||||
echo -n "Ensuring QEMU is configured for cross-platform builds..."
|
||||
if docker run --rm --privileged tonistiigi/binfmt --install all >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure arm64 and amd64 platforms are active
|
||||
echo -n "Ensuring 'mybuilder' supports linux/arm64 and linux/amd64..."
|
||||
active_platforms=$(docker buildx inspect mybuilder --bootstrap | grep -oP '(?<=Platforms: ).*')
|
||||
|
||||
if [[ "$active_platforms" == *"linux/arm64"* && "$active_platforms" == *"linux/amd64"* ]]; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo
|
||||
echo -n " Enabling platforms linux/arm64 and linux/amd64..."
|
||||
if docker buildx create --name mybuilder --driver docker-container --use --platform linux/amd64,linux/arm64 >/dev/null 2>&1 && \
|
||||
docker buildx inspect mybuilder --bootstrap >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo
|
||||
echo -e "${BLUE}### Done.${NC}"
|
||||
echo
|
||||
echo -e "${BLUE}Examples:${NC}"
|
||||
echo -e " ${BLUE}Manual multi-arch build:${NC}"
|
||||
echo " docker buildx build --platform linux/amd64,linux/arm64 -t username/demo:latest --push ."
|
||||
echo
|
||||
echo -e " ${BLUE}Documentation:${NC} https://docs.docker.com/desktop/multi-arch/"
|
||||
echo
|
||||
echo -e " ${BLUE}Build release with Docker Compose:${NC}"
|
||||
echo " docker compose build"
|
||||
echo
|
||||
echo -e " ${BLUE}Build and push release with Docker Compose:${NC}"
|
||||
echo " docker compose build --push"
|
||||
echo
|
||||
echo -e " ${BLUE}Build a single image with Docker Compose:${NC}"
|
||||
echo " docker compose build tpotinit"
|
||||
echo
|
||||
echo -e " ${BLUE}Build and push a single image with Docker Compose:${NC}"
|
||||
echo " docker compose build tpotinit --push"
|
||||
echo
|
||||
echo -e "${BLUE}Resolve buildx issues:${NC}"
|
||||
echo " docker buildx create --use --name mybuilder"
|
||||
echo " docker buildx inspect mybuilder --bootstrap"
|
||||
echo " docker login -u <username>"
|
||||
echo " docker login ghcr.io -u <username>"
|
||||
echo
|
||||
echo -e "${BLUE}Fix segmentation faults when building arm64 images:${NC}"
|
||||
echo " docker buildx rm mybuilder && docker run --rm --privileged tonistiigi/binfmt --install all"
|
||||
echo
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
FROM alpine:3.20 AS builder
|
||||
#
|
||||
# Include dist
|
||||
COPY dist/ /root/dist/
|
||||
#
|
||||
# Install packages
|
||||
RUN apk --no-cache -U upgrade && \
|
||||
apk --no-cache -U add \
|
||||
build-base \
|
||||
git \
|
||||
procps \
|
||||
py3-psutil \
|
||||
py3-requests \
|
||||
py3-pip \
|
||||
python3 && \
|
||||
pip3 install --break-system-packages pyinstaller && \
|
||||
#
|
||||
# Install adbhoney from git
|
||||
git clone https://github.com/t3chn0m4g3/ADBHoney /opt/adbhoney && \
|
||||
cd /opt/adbhoney && \
|
||||
git checkout 42a73cd8a82ddd4d137de70ac37b1a8b2e3e0119 && \
|
||||
cp /root/dist/adbhoney.cfg /opt/adbhoney && \
|
||||
sed -i 's/dst_ip/dest_ip/' /opt/adbhoney/adbhoney/core.py && \
|
||||
sed -i 's/dst_port/dest_port/' /opt/adbhoney/adbhoney/core.py && \
|
||||
pyinstaller adbhoney.spec
|
||||
#
|
||||
FROM alpine:3.20
|
||||
RUN apk --no-cache -U upgrade
|
||||
COPY --from=builder /opt/adbhoney/dist/adbhoney/ /opt/adbhoney/
|
||||
#
|
||||
# Set workdir and start adbhoney
|
||||
STOPSIGNAL SIGINT
|
||||
USER 2000:2000
|
||||
WORKDIR /opt/adbhoney/
|
||||
CMD ["./adbhoney"]
|
||||
21
docker/adbhoney/dist/adbhoney.cfg
vendored
|
|
@ -1,21 +0,0 @@
|
|||
[honeypot]
|
||||
hostname = honeypot01
|
||||
|
||||
address = 0.0.0.0
|
||||
port = 5555
|
||||
http_download = true
|
||||
http_timeout = 45
|
||||
|
||||
download_dir = dl/
|
||||
log_dir = log/
|
||||
|
||||
device_id = device::http://ro.product.name =starltexx;ro.product.model=SM-G960F;ro.product.device=starlte;features=cmd,stat_v2,shell_v2
|
||||
|
||||
[output_log]
|
||||
enabled = true
|
||||
log_file = adbhoney.log
|
||||
log_level = info
|
||||
|
||||
[output_json]
|
||||
enabled = true
|
||||
log_file = adbhoney.json
|
||||
42
docker/adbhoney/dist/cpu_check.py
vendored
|
|
@ -1,42 +0,0 @@
|
|||
import psutil
|
||||
import sys
|
||||
import time
|
||||
|
||||
if len(sys.argv) != 3:
|
||||
print("Usage: cpu_check.py <PID> <CPU_USAGE_THRESHOLD>")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
pid = int(sys.argv[1])
|
||||
except ValueError:
|
||||
print("Please provide a valid integer value for the PID.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
cpu_threshold = float(sys.argv[2])
|
||||
except ValueError:
|
||||
print("Please provide a valid number for the CPU usage threshold.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
target_process = psutil.Process(pid)
|
||||
except psutil.NoSuchProcess:
|
||||
print(f"No process with the PID {pid} was found.")
|
||||
sys.exit(1)
|
||||
|
||||
# Prepare to calculate the average CPU usage over 3 intervals of 1 second each
|
||||
cpu_usages = []
|
||||
for _ in range(3):
|
||||
cpu_usages.append(target_process.cpu_percent(interval=1))
|
||||
|
||||
# Calculate the average CPU usage
|
||||
average_cpu_usage = sum(cpu_usages) / len(cpu_usages)
|
||||
print(f"Average CPU Usage of PID {pid} over 3 seconds: {average_cpu_usage}%")
|
||||
|
||||
# Check average CPU usage against the threshold
|
||||
if average_cpu_usage >= cpu_threshold:
|
||||
print(f"Average CPU usage of PID {pid} is above or equal to the threshold of {cpu_threshold}%.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(f"Average CPU usage of PID {pid} is below the threshold of {cpu_threshold}%. Exiting with code 0.")
|
||||
sys.exit(0)
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
networks:
|
||||
adbhoney_local:
|
||||
|
||||
services:
|
||||
|
||||
# Adbhoney service
|
||||
adbhoney:
|
||||
build: .
|
||||
container_name: adbhoney
|
||||
restart: always
|
||||
# cpu_count: 1
|
||||
# cpus: 0.25
|
||||
networks:
|
||||
- adbhoney_local
|
||||
ports:
|
||||
- "5555:5555"
|
||||
image: "dtagdevsec/adbhoney:24.04"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/adbhoney/log:/opt/adbhoney/log
|
||||
- $HOME/tpotce/data/adbhoney/downloads:/opt/adbhoney/dl
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
FROM golang:1.23-alpine AS builder
|
||||
#
|
||||
ENV GO111MODULE=on \
|
||||
CGO_ENABLED=0 \
|
||||
GOOS=linux
|
||||
#
|
||||
# Install packages
|
||||
RUN apk -U add git
|
||||
#
|
||||
WORKDIR /root
|
||||
#
|
||||
# Build beelzebub
|
||||
RUN git clone https://github.com/t3chn0m4g3/beelzebub && \
|
||||
cd beelzebub && \
|
||||
git checkout 0b9aba53ec1671f669d22782758142a1d411b858
|
||||
WORKDIR /root/beelzebub
|
||||
RUN go mod download
|
||||
RUN go build -o main .
|
||||
RUN sed -i "s#logsPath: ./log#logsPath: ./configurations/log/beelzebub.json#g" /root/beelzebub/configurations/beelzebub.yaml
|
||||
RUN sed -i 's/passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$"/passwordRegex: ".*"/g' /root/beelzebub/configurations/services/ssh-22.yaml
|
||||
#
|
||||
FROM scratch
|
||||
#
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=builder /root/beelzebub/main /opt/beelzebub/
|
||||
COPY --from=builder /root/beelzebub/configurations /opt/beelzebub/configurations
|
||||
#
|
||||
# Start beelzebub
|
||||
WORKDIR /opt/beelzebub
|
||||
USER 2000:2000
|
||||
ENTRYPOINT ["./main"]
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
networks:
|
||||
beelzebub_local:
|
||||
|
||||
services:
|
||||
|
||||
# Beelzebub service
|
||||
beelzebub:
|
||||
build: .
|
||||
container_name: beelzebub
|
||||
restart: always
|
||||
# cpu_count: 1
|
||||
# cpus: 0.25
|
||||
networks:
|
||||
- beelzebub_local
|
||||
ports:
|
||||
- "22:22"
|
||||
- "80:80"
|
||||
- "2222:2222"
|
||||
- "3306:3306"
|
||||
- "8080:8080"
|
||||
environment:
|
||||
LLM_MODEL: "ollama"
|
||||
LLM_HOST: "http://ollama.local:11434/api/chat"
|
||||
OLLAMA_MODEL: "openchat"
|
||||
image: "ghcr.io/telekom-security/beelzebub:24.04.1"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/beelzebub/key:/opt/beelzebub/configurations/key
|
||||
- $HOME/tpotce/data/beelzebub/log:/opt/beelzebub/configurations/log
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
FROM alpine:3.20 AS builder
|
||||
#
|
||||
# Install packages
|
||||
RUN apk --no-cache -U upgrade && \
|
||||
apk --no-cache -U add \
|
||||
build-base \
|
||||
git \
|
||||
libffi \
|
||||
libffi-dev \
|
||||
openssl \
|
||||
openssl-dev \
|
||||
py3-pip \
|
||||
python3 \
|
||||
python3-dev && \
|
||||
#
|
||||
# Get and install packages
|
||||
mkdir -p /opt/ && \
|
||||
cd /opt/ && \
|
||||
git clone https://github.com/t3chn0m4g3/ciscoasa_honeypot && \
|
||||
cd ciscoasa_honeypot && \
|
||||
git checkout 4bd2795cfa14320a87c00b7159fa3b7d6a8ba254 && \
|
||||
sed -i "s/git+git/git+https/g" requirements.txt && \
|
||||
pip3 install --break-system-packages pyinstaller && \
|
||||
pip3 install --break-system-packages --no-cache-dir -r requirements.txt
|
||||
WORKDIR /opt/ciscoasa_honeypot
|
||||
RUN pyinstaller asa_server.py --add-data "./asa:./asa"
|
||||
#
|
||||
FROM alpine:3.20
|
||||
RUN apk --no-cache -U upgrade
|
||||
COPY --from=builder /opt/ciscoasa_honeypot/dist/ /opt/
|
||||
#
|
||||
# Start ciscoasa
|
||||
STOPSIGNAL SIGINT
|
||||
WORKDIR /opt/asa_server/
|
||||
USER 2000:2000
|
||||
CMD ./asa_server --ike-port 5000 --enable_ssl --port 8443 --verbose >> /var/log/ciscoasa/ciscoasa.log 2>&1
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
networks:
|
||||
ciscoasa_local:
|
||||
|
||||
services:
|
||||
|
||||
# Ciscoasa service
|
||||
ciscoasa:
|
||||
build: .
|
||||
container_name: ciscoasa
|
||||
restart: always
|
||||
tmpfs:
|
||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
||||
# cpu_count: 1
|
||||
# cpus: 0.25
|
||||
networks:
|
||||
- ciscoasa_local
|
||||
ports:
|
||||
- "5000:5000/udp"
|
||||
- "8443:8443"
|
||||
image: "dtagdevsec/ciscoasa:24.04"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/ciscoasa/log:/var/log/ciscoasa
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
FROM alpine:3.20 AS builder
|
||||
#
|
||||
# Install packages
|
||||
RUN apk --no-cache -U upgrade && \
|
||||
apk --no-cache -U add \
|
||||
build-base \
|
||||
git \
|
||||
openssl \
|
||||
py3-pip \
|
||||
python3 && \
|
||||
pip3 install --break-system-packages --no-cache-dir \
|
||||
pyinstaller \
|
||||
python-json-logger
|
||||
#
|
||||
# Install CitrixHoneypot from GitHub
|
||||
RUN git clone https://github.com/t3chn0m4g3/CitrixHoneypot /opt/citrixhoneypot && \
|
||||
cd /opt/citrixhoneypot && \
|
||||
git checkout dee32447033a0296d053e8f881bf190f9dd7ad44 && \
|
||||
mkdir -p /opt/citrixhoneypot/logs /opt/citrixhoneypot/ssl && \
|
||||
openssl req \
|
||||
-nodes \
|
||||
-x509 \
|
||||
-newkey rsa:2048 \
|
||||
-keyout "/opt/citrixhoneypot/ssl/key.pem" \
|
||||
-out "/opt/citrixhoneypot/ssl/cert.pem" \
|
||||
-days 365 \
|
||||
-subj '/C=AU/ST=Some-State/O=Internet Widgits Pty Ltd' && \
|
||||
chown 2000:2000 -R ssl/
|
||||
#
|
||||
WORKDIR /opt/citrixhoneypot
|
||||
RUN pyinstaller CitrixHoneypot.py
|
||||
#
|
||||
FROM alpine:3.20
|
||||
RUN apk --no-cache -U upgrade
|
||||
COPY --from=builder /opt/citrixhoneypot/dist/CitrixHoneypot/ /opt/citrixhoneypot
|
||||
COPY --from=builder /opt/citrixhoneypot/ssl /opt/citrixhoneypot/ssl
|
||||
COPY --from=builder /opt/citrixhoneypot/responses/ /opt/citrixhoneypot/responses
|
||||
#
|
||||
# Set workdir and start citrixhoneypot
|
||||
STOPSIGNAL SIGINT
|
||||
USER 2000:2000
|
||||
WORKDIR /opt/citrixhoneypot/
|
||||
CMD nohup ./CitrixHoneypot
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
networks:
|
||||
citrixhoneypot_local:
|
||||
|
||||
services:
|
||||
|
||||
# CitrixHoneypot service
|
||||
citrixhoneypot:
|
||||
build: .
|
||||
container_name: citrixhoneypot
|
||||
restart: always
|
||||
# cpu_count: 1
|
||||
# cpus: 0.25
|
||||
networks:
|
||||
- citrixhoneypot_local
|
||||
ports:
|
||||
- "443:443"
|
||||
image: "dtagdevsec/citrixhoneypot:24.04"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/citrixhoneypot/log:/opt/citrixhoneypot/logs
|
||||
|
|
@ -1,104 +0,0 @@
|
|||
FROM alpine:3.19
|
||||
#
|
||||
# Include dist
|
||||
COPY dist/ /root/dist/
|
||||
#
|
||||
# Install packages
|
||||
RUN apk --no-cache -U upgrade && \
|
||||
apk --no-cache -U add \
|
||||
build-base \
|
||||
cython \
|
||||
file \
|
||||
git \
|
||||
libev \
|
||||
libtool \
|
||||
libcap \
|
||||
libffi-dev \
|
||||
libxslt \
|
||||
libxslt-dev \
|
||||
mariadb-dev \
|
||||
pkgconfig \
|
||||
procps \
|
||||
python3 \
|
||||
python3-dev \
|
||||
py3-cffi \
|
||||
py3-cryptography \
|
||||
py3-freezegun \
|
||||
py3-gevent \
|
||||
py3-lxml \
|
||||
py3-natsort \
|
||||
py3-pip \
|
||||
py3-ply \
|
||||
py3-psutil \
|
||||
py3-pycryptodomex \
|
||||
py3-pytest \
|
||||
py3-requests \
|
||||
py3-pyserial \
|
||||
py3-setuptools \
|
||||
py3-slugify \
|
||||
py3-snmp \
|
||||
py3-sphinx \
|
||||
py3-wheel \
|
||||
py3-zope-event \
|
||||
py3-zope-interface \
|
||||
wget && \
|
||||
#
|
||||
# Setup ConPot
|
||||
git clone https://github.com/t3chn0m4g3/cpppo /opt/cpppo && \
|
||||
cd /opt/cpppo && \
|
||||
git checkout 350d5187a941e7359c53087dcb1f0e41ece5682c && \
|
||||
pip3 install --break-system-packages --no-cache-dir --upgrade pip && \
|
||||
pip3 install --break-system-packages --no-cache-dir . && \
|
||||
git clone https://github.com/mushorg/conpot /opt/conpot && \
|
||||
cd /opt/conpot/ && \
|
||||
git checkout 26c67d11b08a855a28e87abd186d959741f46c7f && \
|
||||
# Change template default ports if <1024
|
||||
sed -i 's/port="2121"/port="21"/' /opt/conpot/conpot/templates/default/ftp/ftp.xml && \
|
||||
sed -i 's/port="8800"/port="80"/' /opt/conpot/conpot/templates/default/http/http.xml && \
|
||||
sed -i 's/port="6230"/port="623"/' /opt/conpot/conpot/templates/default/ipmi/ipmi.xml && \
|
||||
sed -i 's/port="5020"/port="502"/' /opt/conpot/conpot/templates/default/modbus/modbus.xml && \
|
||||
sed -i 's/port="10201"/port="102"/' /opt/conpot/conpot/templates/default/s7comm/s7comm.xml && \
|
||||
sed -i 's/port="16100"/port="161"/' /opt/conpot/conpot/templates/default/snmp/snmp.xml && \
|
||||
sed -i 's/port="6969"/port="69"/' /opt/conpot/conpot/templates/default/tftp/tftp.xml && \
|
||||
sed -i 's/port="16100"/port="161"/' /opt/conpot/conpot/templates/IEC104/snmp/snmp.xml && \
|
||||
sed -i 's/port="6230"/port="623"/' /opt/conpot/conpot/templates/ipmi/ipmi/ipmi.xml && \
|
||||
cp /root/dist/requirements.txt . && \
|
||||
pip3 install --break-system-packages --no-cache-dir . && \
|
||||
cd / && \
|
||||
rm -rf /opt/conpot /tmp/* /var/tmp/* && \
|
||||
setcap cap_net_bind_service=+ep $(readlink -f $(type -P python3)) && \
|
||||
#
|
||||
# Get wireshark manuf db for scapy, setup configs, user, groups
|
||||
mkdir -p /etc/conpot /var/log/conpot /usr/share/wireshark && \
|
||||
wget https://www.wireshark.org/download/automated/data/manuf -o /usr/share/wireshark/manuf && \
|
||||
cp /root/dist/conpot.cfg /etc/conpot/conpot.cfg && \
|
||||
cp -R /root/dist/templates /usr/lib/$(readlink -f $(type -P python3) | cut -f4 -d"/")/site-packages/conpot/ && \
|
||||
cp /root/dist/cpu_check.py / && \
|
||||
addgroup -g 2000 conpot && \
|
||||
adduser -S -s /bin/ash -u 2000 -D -g 2000 conpot && \
|
||||
#
|
||||
# Clean up
|
||||
apk del --purge \
|
||||
build-base \
|
||||
file \
|
||||
git \
|
||||
libev \
|
||||
libtool \
|
||||
libxslt-dev \
|
||||
mariadb-dev \
|
||||
pkgconfig \
|
||||
py3-pip \
|
||||
python3-dev \
|
||||
wget && \
|
||||
rm -rf /root/* \
|
||||
/tmp/* \
|
||||
/var/cache/apk/* \
|
||||
/opt/cpppo/.git \
|
||||
/opt/conpot/.git
|
||||
#
|
||||
# Start conpot
|
||||
STOPSIGNAL SIGINT
|
||||
# Conpot sometimes hangs at 100% CPU usage, if detected container will become unhealthy and restarted by tpotinit
|
||||
HEALTHCHECK --interval=5m --timeout=30s --retries=3 CMD python3 /cpu_check.py $(pgrep -of conpot) 99
|
||||
USER conpot:conpot
|
||||
CMD exec /usr/bin/conpot --mibcache $CONPOT_TMP --temp_dir $CONPOT_TMP --template $CONPOT_TEMPLATE --logfile $CONPOT_LOG --config $CONPOT_CONFIG
|
||||
62
docker/conpot/dist/conpot.cfg
vendored
|
|
@ -1,62 +0,0 @@
|
|||
[common]
|
||||
sensorid = conpot
|
||||
|
||||
[virtual_file_system]
|
||||
data_fs_url = %(CONPOT_TMP)s
|
||||
fs_url = tar:///usr/lib/python3.11/site-packages/conpot/data.tar
|
||||
|
||||
[session]
|
||||
timeout = 30
|
||||
|
||||
[daemon]
|
||||
user = conpot
|
||||
group = conpot
|
||||
|
||||
[json]
|
||||
enabled = True
|
||||
filename = %(CONPOT_JSON_LOG)s
|
||||
|
||||
[sqlite]
|
||||
enabled = False
|
||||
|
||||
[mysql]
|
||||
enabled = False
|
||||
device = /tmp/mysql.sock
|
||||
host = localhost
|
||||
port = 3306
|
||||
db = conpot
|
||||
username = conpot
|
||||
passphrase = conpot
|
||||
socket = tcp ; tcp (sends to host:port), dev (sends to mysql device/socket file)
|
||||
|
||||
[syslog]
|
||||
enabled = False
|
||||
device = /dev/log
|
||||
host = localhost
|
||||
port = 514
|
||||
facility = local0
|
||||
socket = dev ; udp (sends to host:port), dev (sends to device)
|
||||
|
||||
[hpfriends]
|
||||
enabled = False
|
||||
host = hpfriends.honeycloud.net
|
||||
port = 20000
|
||||
ident = 3Ykf9Znv
|
||||
secret = 4nFRhpm44QkG9cvD
|
||||
channels = ["conpot.events", ]
|
||||
|
||||
[taxii]
|
||||
enabled = False
|
||||
host = taxiitest.mitre.org
|
||||
port = 80
|
||||
inbox_path = /services/inbox/default/
|
||||
use_https = False
|
||||
|
||||
[fetch_public_ip]
|
||||
enabled = True
|
||||
urls = ["http://whatismyip.akamai.com/", "http://wgetip.com/"]
|
||||
|
||||
[change_mac_addr]
|
||||
enabled = False
|
||||
iface = eth0
|
||||
addr = 00:de:ad:be:ef:00
|
||||
42
docker/conpot/dist/cpu_check.py
vendored
|
|
@ -1,42 +0,0 @@
|
|||
import psutil
|
||||
import sys
|
||||
import time
|
||||
|
||||
if len(sys.argv) != 3:
|
||||
print("Usage: cpu_check.py <PID> <CPU_USAGE_THRESHOLD>")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
pid = int(sys.argv[1])
|
||||
except ValueError:
|
||||
print("Please provide a valid integer value for the PID.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
cpu_threshold = float(sys.argv[2])
|
||||
except ValueError:
|
||||
print("Please provide a valid number for the CPU usage threshold.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
target_process = psutil.Process(pid)
|
||||
except psutil.NoSuchProcess:
|
||||
print(f"No process with the PID {pid} was found.")
|
||||
sys.exit(1)
|
||||
|
||||
# Prepare to calculate the average CPU usage over 3 intervals of 1 second each
|
||||
cpu_usages = []
|
||||
for _ in range(3):
|
||||
cpu_usages.append(target_process.cpu_percent(interval=1))
|
||||
|
||||
# Calculate the average CPU usage
|
||||
average_cpu_usage = sum(cpu_usages) / len(cpu_usages)
|
||||
print(f"Average CPU Usage of PID {pid} over 3 seconds: {average_cpu_usage}%")
|
||||
|
||||
# Check average CPU usage against the threshold
|
||||
if average_cpu_usage >= cpu_threshold:
|
||||
print(f"Average CPU usage of PID {pid} is above or equal to the threshold of {cpu_threshold}%.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(f"Average CPU usage of PID {pid} is below the threshold of {cpu_threshold}%. Exiting with code 0.")
|
||||
sys.exit(0)
|
||||
18
docker/conpot/dist/requirements.txt
vendored
|
|
@ -1,18 +0,0 @@
|
|||
pysnmp-mibs
|
||||
pysmi==0.3.4
|
||||
libtaxii>=1.1.0
|
||||
crc16
|
||||
scapy==2.4.5
|
||||
hpfeeds3
|
||||
modbus-tk
|
||||
stix-validator
|
||||
stix
|
||||
cybox
|
||||
bacpypes==0.17.0
|
||||
pyghmi==1.4.1
|
||||
mixbox
|
||||
modbus-tk
|
||||
fs==2.3.0
|
||||
tftpy
|
||||
# some freezegun versions broken
|
||||
sphinx_rtd_theme
|
||||
679
docker/conpot/dist/templates/IEC104/template.xml
vendored
|
|
@ -1,679 +0,0 @@
|
|||
<!-- Copyright (C) 2017 Patrick Reichenberger (University of Passau) <patrick.reichenberger@t-online.de>
|
||||
|
||||
This program is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License
|
||||
as published by the Free Software Foundation; either version 2
|
||||
of the License, or (at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
-->
|
||||
|
||||
<core>
|
||||
<template>
|
||||
<!-- General information about the template -->
|
||||
<entity name="unit">S7-300</entity>
|
||||
<entity name="vendor">Siemens</entity>
|
||||
<entity name="description">Creates a simple device for IEC 60870-5-104</entity>
|
||||
<entity name="protocols">IEC104, SNMP</entity>
|
||||
<entity name="creator">Patrick Reichenberger</entity>
|
||||
</template>
|
||||
<databus>
|
||||
<!-- Core value that can be retrieved from the databus by key -->
|
||||
<key_value_mappings>
|
||||
<!-- SNMPv2-MIB -->
|
||||
<key name="SystemDescription">
|
||||
<value type="value">"Siemens, SIMATIC, S7-300"</value>
|
||||
</key>
|
||||
<key name="sysObjectID">
|
||||
<value type="value">"0.0"</value>
|
||||
</key>
|
||||
<key name="Uptime">
|
||||
<value type="function">conpot.emulators.misc.uptime.Uptime</value>
|
||||
</key>
|
||||
<key name="sysContact">
|
||||
<value type="value">"Corporate IT"</value>
|
||||
</key>
|
||||
<key name="sysName">
|
||||
<value type="value">"DE-BER01"</value>
|
||||
</key>
|
||||
<key name="sysLocation">
|
||||
<value type="value">"BER01, T2E"</value>
|
||||
</key>
|
||||
<key name="sysServices">
|
||||
<value type="value">"72"</value>
|
||||
</key>
|
||||
<!-- IF-MIB -->
|
||||
<key name="ifNumber">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="ifIndex">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="ifDescr">
|
||||
<value type="value">"Siemens, SIMATIC NET, CP 343-1 PN, 6GK7 343-1EX21-0XE0, HW: Version 2, FW: Version V1.2.3, Ethernet Port 1, Rack 0, 100Mbit"</value>
|
||||
</key>
|
||||
<key name="ifType">
|
||||
<value type="value">6</value>
|
||||
</key>
|
||||
<key name="ifMtu">
|
||||
<value type="value">1000</value>
|
||||
</key>
|
||||
<key name="ifSpeed">
|
||||
<value type="value">100000000</value>
|
||||
</key>
|
||||
<key name="ifPhysAddress">
|
||||
<value type="value">"0x000e8c29c51a"</value>
|
||||
</key>
|
||||
<key name="ifAdminStatus">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="ifOperStatus">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="ifLastChange">
|
||||
<value type="function">conpot.emulators.misc.uptime.Uptime</value>
|
||||
</key>
|
||||
<key name="FacilityName">
|
||||
<value type="value">"Compagnie Generale des Eaux"</value>
|
||||
</key>
|
||||
<key name="0">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="1">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="ifInOctets">
|
||||
<value type="function">conpot.emulators.misc.sysinfo.BytesRecv</value>
|
||||
</key>
|
||||
<key name="ifInUcastPkts">
|
||||
<value type="function">conpot.emulators.misc.sysinfo.PacketsRecv</value>
|
||||
</key>
|
||||
<key name="ifInNUcastPkts">
|
||||
<value type="value">291</value>
|
||||
</key>
|
||||
<key name="ifOutOctets">
|
||||
<value type="function">conpot.emulators.misc.sysinfo.BytesSent</value>
|
||||
</key>
|
||||
<key name="ifOutUcastPkts">
|
||||
<value type="function">conpot.emulators.misc.sysinfo.PacketsSent</value>
|
||||
</key>
|
||||
<key name="ifOutUNcastPkts">
|
||||
<value type="value">143</value>
|
||||
</key>
|
||||
|
||||
<!-- IP-MIB -->
|
||||
<key name="ipForwarding">
|
||||
<value type="value">2</value>
|
||||
</key>
|
||||
<key name="ipDefaultTTL">
|
||||
<value type="value">60</value>
|
||||
</key>
|
||||
<key name="ipInReceives">
|
||||
<value type="value">31271</value>
|
||||
</key>
|
||||
<key name="ipInHdrErrors">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="ipInAddrErrors">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="ipForwDatagrams">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="ipInUnknownProtos">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="ipInDiscards">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="ipInDelivers">
|
||||
<value type="value">31282</value>
|
||||
</key>
|
||||
<key name="ipOutRequests">
|
||||
<value type="value">69023</value>
|
||||
</key>
|
||||
<key name="ipOutDiscards">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="ipOutNoRoutes">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="ipReasmTimeout">
|
||||
<value type="value">60</value>
|
||||
</key>
|
||||
<key name="ipReasmReqds">
|
||||
<value type="value">7</value>
|
||||
</key>
|
||||
<key name="ipReasmOKs">
|
||||
<value type="value">3</value>
|
||||
</key>
|
||||
<key name="ipReasmFails">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="ipFragOKs">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="ipFragFails">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="ipFragCreates">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="ipAdEntAddr">
|
||||
<value type="function">conpot.emulators.misc.sysinfo.LocalIP</value>
|
||||
</key>
|
||||
<key name="ipAdEntIfIndex">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="ipAdEntNetMask">
|
||||
<value type="value">"255.255.255.255"</value>
|
||||
</key>
|
||||
<key name="ipAdEntBcastAddr">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="ipAdEntReasmMaxSize">
|
||||
<value type="value">65528</value>
|
||||
</key>
|
||||
<key name="ipRoutingDiscards">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpInMsgs">
|
||||
<value type="value">4</value>
|
||||
</key>
|
||||
<key name="icmpInErrors">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpInDestUnreachs">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="icmpInTimeExcds">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpInParmProbs">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpInSrcQuenchs">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpInRedirects">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpInEchos">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpInEchoReps">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpInTimestamps">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpInTimestampReps">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpInAddrMasks">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpInAddrMaskReps">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpOutMsgs">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpOutErrors">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpOutDestUnreachs">
|
||||
<value type="value">144</value>
|
||||
</key>
|
||||
<key name="icmpOutTimeExcds">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpOutParmProbs">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpOutSrcQuenchs">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpOutRedirects">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpOutEchos">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpOutEchoReps">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpOutTimestamps">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpOutTimestampReps">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpOutAddrMasks">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="icmpOutAddrMaskReps">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
|
||||
<!-- TCP-MIB -->
|
||||
<key name="tcpRtoAlgorithm">
|
||||
<value type="value">2</value>
|
||||
</key>
|
||||
<key name="tcpRtoMin">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="tcpRtoMax">
|
||||
<value type="value">100</value>
|
||||
</key>
|
||||
<key name="tcpMaxConn">
|
||||
<value type="value">-1</value>
|
||||
</key>
|
||||
<key name="tcpActiveOpens">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="tcpPassiveOpens">
|
||||
<value type="value">101</value>
|
||||
</key>
|
||||
<key name="tcpAttemptFails">
|
||||
<value type="value">42</value>
|
||||
</key>
|
||||
<key name="tcpEstabResets">
|
||||
<value type="value">45</value>
|
||||
</key>
|
||||
<key name="tcpCurrEstab">
|
||||
<value type="function">conpot.emulators.misc.sysinfo.TcpCurrEstab</value>
|
||||
</key>
|
||||
<key name="tcpInSegs">
|
||||
<value type="value">30321</value>
|
||||
</key>
|
||||
<key name="tcpOutSegs">
|
||||
<value type="value">67821</value>
|
||||
</key>
|
||||
<key name="tcpRetransSegs">
|
||||
<value type="value">2511</value>
|
||||
</key>
|
||||
<key name="tcpConnState">
|
||||
<value type="value">2</value>
|
||||
</key>
|
||||
<key name="tcpConnLocalAddress">
|
||||
<value type="function">conpot.emulators.misc.sysinfo.LocalIP</value>
|
||||
</key>
|
||||
<key name="tcpConnLocalPort">
|
||||
<value type="value">2404</value>
|
||||
</key>
|
||||
<key name="tcpConnRemAddress">
|
||||
<value type="value">"0.0.0.0"</value>
|
||||
</key>
|
||||
<key name="tcpConnRemPort">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="tcpInErrs">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="tcpOutRsts">
|
||||
<value type="value">728</value>
|
||||
</key>
|
||||
<!-- UDP-MIB -->
|
||||
<key name="udpInDatagrams">
|
||||
<value type="value">1441</value>
|
||||
</key>
|
||||
<key name="udpNoPorts">
|
||||
<value type="value">1280</value>
|
||||
</key>
|
||||
<key name="udpInErrors">
|
||||
<value type="value">23</value>
|
||||
</key>
|
||||
<key name="udpOutDatagrams">
|
||||
<value type="value">47</value>
|
||||
</key>
|
||||
<key name="udpLocalAddress">
|
||||
<value type="value">"163.172.189.137"</value>
|
||||
</key>
|
||||
<key name="udpLocalPort">
|
||||
<value type="value">161</value>
|
||||
</key>
|
||||
<key name="SystemName">
|
||||
<value type="value">"CP 343-1 IT"</value>
|
||||
</key>
|
||||
|
||||
|
||||
<!-- IEC104 Protocol parameter -->
|
||||
<!-- Common (Object) Address, aka COA, Station Address -->
|
||||
<key name="CommonAddress">
|
||||
<value type="value">"0x1e28"</value>
|
||||
</key>
|
||||
<!-- Timeout of connection establishment -->
|
||||
<key name="T_0">
|
||||
<value type="value">30</value>
|
||||
</key>
|
||||
<!-- Timeout of send or test APDUs (Wartezeit auf Quittung) -->
|
||||
<key name="T_1">
|
||||
<value type="value">15</value>
|
||||
</key>
|
||||
<!-- Timeout for acknowledges in case of no data messages T_2 < T_1 (Quittieren nach x sek) -->
|
||||
<key name="T_2">
|
||||
<value type="value">10</value>
|
||||
</key>
|
||||
<!-- Timeout for sending test frames in case of a long idle state -->
|
||||
<key name="T_3">
|
||||
<value type="value">20</value>
|
||||
</key>
|
||||
<!-- Maximum difference receive sequence number to send state variable (Max. Anzahl unquittierter Telegramme) -->
|
||||
<!-- not implemented yet -->
|
||||
<key name="k">
|
||||
<value type="value">12</value>
|
||||
</key>
|
||||
<!-- Latest acknowledge after receiving w I-format APDUs (Quittieren nach w Telegrammen) -->
|
||||
<key name="w">
|
||||
<value type="value">8</value>
|
||||
</key>
|
||||
<!-- Maximum frame size (in bytes) -->
|
||||
<key name="MaxFrameSize">
|
||||
<value type="value">254</value>
|
||||
</key>
|
||||
|
||||
<!-- Devices -->
|
||||
<!-- 13- -->
|
||||
<key name="13_20">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="13_21">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="13_22">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="13_24">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="13_25">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="13_32">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="13_33">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="13_34">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="13_35">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="13_36">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="13_37">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="13_38">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="13_39">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="13_40">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="13_41">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="13_42">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
|
||||
<!-- 22- -->
|
||||
<key name="22_19">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="22_20">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="22_21">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="22_22">
|
||||
<value type="value">0</value>
|
||||
</key>
|
||||
<key name="22_24">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="22_25">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="22_42">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="22_43">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="22_54">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
|
||||
<!-- 33- -->
|
||||
<key name="33_2">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="33_3">
|
||||
<value type="value">2</value>
|
||||
</key>
|
||||
<key name="33_4">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="33_5">
|
||||
<value type="value">2</value>
|
||||
</key>
|
||||
<key name="33_6">
|
||||
<value type="value">2</value>
|
||||
</key>
|
||||
<key name="33_7">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="33_8">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="33_9">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="33_10">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="33_11">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
|
||||
<!-- 60- -->
|
||||
<key name="60_6">
|
||||
<value type="value">2</value>
|
||||
</key>
|
||||
<key name="60_7">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="60_8">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="60_9">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="60_20">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="60_21">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="60_32">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="60_34">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="60_35">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
<key name="60_36">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
|
||||
<!-- 100- -->
|
||||
<key name="100_12">
|
||||
<value type="value">103</value>
|
||||
</key>
|
||||
<key name="100_13">
|
||||
<value type="value">31</value>
|
||||
</key>
|
||||
<key name="100_51">
|
||||
<value type="value">-49</value>
|
||||
</key>
|
||||
<key name="100_108">
|
||||
<value type="value">28871</value>
|
||||
</key>
|
||||
<key name="100_109">
|
||||
<value type="value">13781</value>
|
||||
</key>
|
||||
<key name="100_178">
|
||||
<value type="value">119</value>
|
||||
</key>
|
||||
<key name="100_179">
|
||||
<value type="value">219</value>
|
||||
</key>
|
||||
<key name="100_190">
|
||||
<value type="value">1009</value>
|
||||
</key>
|
||||
<key name="100_191">
|
||||
<value type="value">-2</value>
|
||||
</key>
|
||||
<key name="100_192">
|
||||
<value type="value">701</value>
|
||||
</key>
|
||||
<key name="100_193">
|
||||
<value type="value">441</value>
|
||||
</key>
|
||||
|
||||
<!-- 101- -->
|
||||
<key name="101_63">
|
||||
<value type="value">103</value>
|
||||
</key>
|
||||
<key name="101_205">
|
||||
<value type="value">31</value>
|
||||
</key>
|
||||
<key name="101_100">
|
||||
<value type="value">5</value>
|
||||
</key>
|
||||
<key name="101_101">
|
||||
<value type="value">49</value>
|
||||
</key>
|
||||
<key name="101_102">
|
||||
<value type="value">119</value>
|
||||
</key>
|
||||
<key name="101_105">
|
||||
<value type="value">500</value>
|
||||
</key>
|
||||
<key name="101_106">
|
||||
<value type="value">1</value>
|
||||
</key>
|
||||
|
||||
<!-- 107- -->
|
||||
<key name="107_3">
|
||||
<value type="value">16.2</value>
|
||||
</key>
|
||||
<key name="107_77">
|
||||
<value type="value">15.9</value>
|
||||
</key>
|
||||
<key name="107_78">
|
||||
<value type="value">512.1</value>
|
||||
</key>
|
||||
<key name="107_79">
|
||||
<value type="value">433.4</value>
|
||||
</key>
|
||||
<key name="107_90">
|
||||
<value type="value">344.4</value>
|
||||
</key>
|
||||
<key name="107_130">
|
||||
<value type="value">-0.44013</value>
|
||||
</key>
|
||||
<key name="107_131">
|
||||
<value type="value">43.0</value>
|
||||
</key>
|
||||
<key name="107_132">
|
||||
<value type="value">41.2</value>
|
||||
</key>
|
||||
<key name="107_141">
|
||||
<value type="value">12.1</value>
|
||||
</key>
|
||||
<key name="107_200">
|
||||
<value type="value">91</value>
|
||||
</key>
|
||||
<key name="107_201">
|
||||
<value type="value">98.8</value>
|
||||
</key>
|
||||
<key name="107_202">
|
||||
<value type="value">110</value>
|
||||
</key>
|
||||
<key name="107_203">
|
||||
<value type="value">85.1</value>
|
||||
</key>
|
||||
<key name="107_204">
|
||||
<value type="value">85.2</value>
|
||||
</key>
|
||||
<key name="107_205">
|
||||
<value type="value">410</value>
|
||||
</key>
|
||||
<key name="107_206">
|
||||
<value type="value">592</value>
|
||||
</key>
|
||||
<key name="107_207">
|
||||
<value type="value">1.5</value>
|
||||
</key>
|
||||
<key name="107_208">
|
||||
<value type="value">44.7</value>
|
||||
</key>
|
||||
<key name="107_209">
|
||||
<value type="value">11.9</value>
|
||||
</key>
|
||||
<key name="107_210">
|
||||
<value type="value">221.45</value>
|
||||
</key>
|
||||
<key name="107_211">
|
||||
<value type="value">13.4</value>
|
||||
</key>
|
||||
<key name="107_212">
|
||||
<value type="value">0.000402</value>
|
||||
</key>
|
||||
|
||||
<!-- 109- -->
|
||||
<key name="109_3">
|
||||
<value type="value">16.2</value>
|
||||
</key>
|
||||
<key name="109_7">
|
||||
<value type="value">15.9</value>
|
||||
</key>
|
||||
<key name="109_8">
|
||||
<value type="value">880</value>
|
||||
</key>
|
||||
<key name="109_10">
|
||||
<value type="value">344.4</value>
|
||||
</key>
|
||||
<key name="109_40">
|
||||
<value type="value">41.2</value>
|
||||
</key>
|
||||
<key name="109_41">
|
||||
<value type="value">12.1</value>
|
||||
</key>
|
||||
|
||||
<key name="empty">
|
||||
<value type="value">""</value>
|
||||
</key>
|
||||
</key_value_mappings>
|
||||
</databus>
|
||||
</core>
|
||||
|
|
@ -1,78 +0,0 @@
|
|||
<core>
|
||||
<template>
|
||||
<!-- General information about the template -->
|
||||
<entity name="unit">S7-200</entity>
|
||||
<entity name="vendor">Siemens</entity>
|
||||
<entity name="description">Rough simulation of a basic Siemens S7-200 CPU with 2 slaves</entity>
|
||||
<entity name="protocols">HTTP, MODBUS, s7comm, SNMP</entity>
|
||||
<entity name="creator">the conpot team</entity>
|
||||
</template>
|
||||
<databus>
|
||||
<!-- Core value that can be retrieved from the databus by key -->
|
||||
<key_value_mappings>
|
||||
<key name="FacilityName">
|
||||
<value type="value">"DoE Water Service"</value>
|
||||
</key>
|
||||
<key name="SystemName">
|
||||
<value type="value">"Central Pump"</value>
|
||||
</key>
|
||||
<key name="SystemDescription">
|
||||
<value type="value">"Pump Control Unit"</value>
|
||||
</key>
|
||||
<key name="Uptime">
|
||||
<value type="function">conpot.emulators.misc.uptime.Uptime</value>
|
||||
</key>
|
||||
<key name="sysObjectID">
|
||||
<value type="value">"0.0"</value>
|
||||
</key>
|
||||
<key name="sysContact">
|
||||
<value type="value">"DoE"</value>
|
||||
</key>
|
||||
<key name="sysName">
|
||||
<value type="value">"Pump Control Unit"</value>
|
||||
</key>
|
||||
<key name="sysLocation">
|
||||
<value type="value">"DoE"</value>
|
||||
</key>
|
||||
<key name="sysServices">
|
||||
<value type="value">"72"</value>
|
||||
</key>
|
||||
<key name="memoryModbusSlave0BlockA">
|
||||
<value type="value">[random.randint(0,1) for b in range(0,128)]</value>
|
||||
</key>
|
||||
<key name="memoryModbusSlave0BlockB">
|
||||
<value type="value">[random.randint(0,1) for b in range(0,32)]</value>
|
||||
</key>
|
||||
<key name="memoryModbusSlave255BlockA">
|
||||
<value type="value">[random.randint(0,1) for b in range(0,128)]</value>
|
||||
</key>
|
||||
<key name="memoryModbusSlave255BlockB">
|
||||
<value type="value">[random.randint(0,1) for b in range(0,32)]</value>
|
||||
</key>
|
||||
<key name="memoryModbusSlave1BlockA">
|
||||
<value type="value">[random.randint(0,1) for b in range(0,128)]</value>
|
||||
</key>
|
||||
<key name="memoryModbusSlave1BlockB">
|
||||
<value type="value">[random.randint(0,1) for b in range(0,32)]</value>
|
||||
</key>
|
||||
<key name="memoryModbusSlave2BlockC">
|
||||
<value type="value">[random.randint(0,1) for b in range(0,8)]</value>
|
||||
</key>
|
||||
<key name="memoryModbusSlave2BlockD">
|
||||
<value type="value">[0 for b in range(0,32)]</value>
|
||||
</key>
|
||||
<key name="Copyright">
|
||||
<value type="value">"Original Siemens Equipment"</value>
|
||||
</key>
|
||||
<key name="s7_id">
|
||||
<value type="value">"88111222"</value>
|
||||
</key>
|
||||
<key name="s7_module_type">
|
||||
<value type="value">"IM151-8 PN/DP CPU"</value>
|
||||
</key>
|
||||
<key name="empty">
|
||||
<value type="value">""</value>
|
||||
</key>
|
||||
</key_value_mappings>
|
||||
</databus>
|
||||
</core>
|
||||
|
|
@ -1,93 +0,0 @@
|
|||
<core>
|
||||
<template>
|
||||
<!-- General information about the template -->
|
||||
<entity name="unit">Guardian AST tank-monitoring system</entity>
|
||||
<entity name="vendor">Guardian</entity>
|
||||
<entity name="description">Guardian AST tank-monitoring system</entity>
|
||||
<entity name="protocols">guardian_ast</entity>
|
||||
<entity name="creator">the conpot team</entity>
|
||||
</template>
|
||||
<databus>
|
||||
<!-- Core value that can be retrieved from the databus by key -->
|
||||
<key_value_mappings>
|
||||
<key name="product1">
|
||||
<value type="value">"SUPER"</value>
|
||||
</key>
|
||||
<key name="product2">
|
||||
<value type="value">"UNLEAD"</value>
|
||||
</key>
|
||||
<key name="product3">
|
||||
<value type="value">"DIESEL"</value>
|
||||
</key>
|
||||
<key name="product4">
|
||||
<value type="value">"ADBLUE"</value>
|
||||
</key>
|
||||
<key name="station_name">
|
||||
<value type="value">"AVIA"</value>
|
||||
</key>
|
||||
<key name="vol1">
|
||||
<value type="value">random.randint(1000, 9050)</value>
|
||||
</key>
|
||||
<key name="vol2">
|
||||
<value type="value">random.randint(1000, 9050)</value>
|
||||
</key>
|
||||
<key name="vol3">
|
||||
<value type="value">random.randint(1000, 9050)</value>
|
||||
</key>
|
||||
<key name="vol4">
|
||||
<value type="value">random.randint(1000, 9050)</value>
|
||||
</key>
|
||||
<key name="ullage1">
|
||||
<value type="value">random.randint(3000, 9999)</value>
|
||||
</key>
|
||||
<key name="ullage2">
|
||||
<value type="value">random.randint(3000, 9999)</value>
|
||||
</key>
|
||||
<key name="ullage3">
|
||||
<value type="value">random.randint(3000, 9999)</value>
|
||||
</key>
|
||||
<key name="ullage4">
|
||||
<value type="value">random.randint(3000, 9999)</value>
|
||||
</key>
|
||||
<key name="height1">
|
||||
<value type="value">round(random.uniform(25.00, 75.99), 2)</value>
|
||||
</key>
|
||||
<key name="height2">
|
||||
<value type="value">round(random.uniform(25.00, 75.99), 2)</value>
|
||||
</key>
|
||||
<key name="height3">
|
||||
<value type="value">round(random.uniform(25.00, 75.99), 2)</value>
|
||||
</key>
|
||||
<key name="height4">
|
||||
<value type="value">round(random.uniform(25.00, 75.99), 2)</value>
|
||||
</key>
|
||||
<key name="h2o1">
|
||||
<value type="value">round(random.uniform(0.0, 9.99), 2)</value>
|
||||
</key>
|
||||
<key name="h2o2">
|
||||
<value type="value">round(random.uniform(0.0, 9.99), 2)</value>
|
||||
</key>
|
||||
<key name="h2o3">
|
||||
<value type="value">round(random.uniform(0.0, 9.99), 2)</value>
|
||||
</key>
|
||||
<key name="h2o4">
|
||||
<value type="value">round(random.uniform(0.0, 9.99), 2)</value>
|
||||
</key>
|
||||
<key name="temp1">
|
||||
<value type="value">round(random.uniform(50.0, 59.99), 2)</value>
|
||||
</key>
|
||||
<key name="temp2">
|
||||
<value type="value">round(random.uniform(50.0, 59.99), 2)</value>
|
||||
</key>
|
||||
<key name="temp3">
|
||||
<value type="value">round(random.uniform(50.0, 59.99), 2)</value>
|
||||
</key>
|
||||
<key name="temp4">
|
||||
<value type="value">round(random.uniform(50.0, 59.99), 2)</value>
|
||||
</key>
|
||||
<key name="empty">
|
||||
<value type="value">""</value>
|
||||
</key>
|
||||
</key_value_mappings>
|
||||
</databus>
|
||||
</core>
|
||||
18
docker/conpot/dist/templates/ipmi/template.xml
vendored
|
|
@ -1,18 +0,0 @@
|
|||
<core>
|
||||
<template>
|
||||
<!-- General information about the template -->
|
||||
<entity name="unit">371</entity>
|
||||
<entity name="vendor">IPMI</entity>
|
||||
<entity name="description">Creates a simple IPMI device</entity>
|
||||
<entity name="protocols">IPMI</entity>
|
||||
<entity name="creator">Lukas Rist</entity>
|
||||
</template>
|
||||
<databus>
|
||||
<!-- Core value that can be retrieved from the databus by key -->
|
||||
<key_value_mappings>
|
||||
<key name="SystemName">
|
||||
<value type="value">"DoE"</value>
|
||||
</key>
|
||||
</key_value_mappings>
|
||||
</databus>
|
||||
</core>
|
||||