mirror of
https://github.com/telekom-security/tpotce.git
synced 2025-04-20 06:02:24 +00:00
tweaking
updating .env, env.example and compose files regarding sentrypeer ENVs make glutton image aware of payloads feature bump glutton to latest master, alpine 3.19, multi-stage build bump ipphoney to alpine 3.19 bump mailoney to alpine 3.19, adjust for py3 revert medpot to previous master, use multi stage build and alpine 3.19 bump cyberchef to latest master bump ngninx to alpine 3.19 bump p0f to alpine 3.19, use multi stage build bump redishoneypot to alpine 3.19, use multi stage build bump sentrypeer to latest master, fix bug for open ports in compose files, now all tcp/5060, udp/5060 traffic will be seen bump spiderfoot to latest master bump spiderfoot to alpine 3.19 bump suricata to 7.0.2, fix performance issue with capture-filter-bpf by reducing the rules update clean.sh to include glutton payloads folder
This commit is contained in:
parent
c45870594b
commit
97adcbeb1b
43 changed files with 5039 additions and 315 deletions
8
.env
8
.env
|
@ -82,14 +82,6 @@ TPOT_ATTACKMAP_TEXT_TIMEZONE=UTC
|
|||
# Some services / tools offer adjustments using ENVs which can be adjusted here.
|
||||
###################################################################################
|
||||
|
||||
# SentryPeer P2P mode
|
||||
# Exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
|
||||
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
|
||||
# the bad actors in its logs. Therefore this option is opt-in based.
|
||||
# 0: This is the default, P2P mode is disabled.
|
||||
# 1: Enable P2P mode.
|
||||
SENTRYPEER_PEER_TO_PEER=0
|
||||
|
||||
# Suricata ET Pro ruleset
|
||||
# OPEN: This is the default and will the ET Open ruleset
|
||||
# OINKCODE: Replace OPEN with your Oinkcode to use the ET Pro ruleset
|
||||
|
|
|
@ -454,12 +454,13 @@ services:
|
|||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
|
|
|
@ -433,12 +433,13 @@ services:
|
|||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
|
|
|
@ -433,12 +433,13 @@ services:
|
|||
depends_on:
|
||||
logstash:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
|
|
|
@ -473,12 +473,13 @@ services:
|
|||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
|
|
|
@ -475,12 +475,13 @@ services:
|
|||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
|
|
|
@ -16,7 +16,6 @@ networks:
|
|||
dionaea_local:
|
||||
elasticpot_local:
|
||||
endlessh_local:
|
||||
glutton_local:
|
||||
hellpot_local:
|
||||
heralding_local:
|
||||
honeypots_local:
|
||||
|
@ -381,6 +380,7 @@ services:
|
|||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/glutton/log:/var/log/glutton
|
||||
- ${TPOT_DATA_PATH}/glutton/payloads:/opt/glutton/payloads
|
||||
|
||||
# Hellpot service
|
||||
hellpot:
|
||||
|
@ -606,12 +606,13 @@ services:
|
|||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
|
|
|
@ -475,12 +475,13 @@ services:
|
|||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
|
||||
# environment:
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/tcp"
|
||||
- "5060:5060/udp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
||||
|
|
|
@ -38,11 +38,12 @@ RUN apk -U --no-cache add \
|
|||
libpcap-dev && \
|
||||
setcap cap_net_admin,cap_net_raw=+ep /opt/glutton/bin/server && \
|
||||
setcap cap_net_admin,cap_net_raw=+ep /sbin/xtables-nft-multi && \
|
||||
mkdir -p /var/log/glutton \
|
||||
/opt/glutton/payloads && \
|
||||
#
|
||||
# Setup user, groups and configs
|
||||
addgroup -g 2000 glutton && \
|
||||
adduser -S -s /bin/ash -u 2000 -D -g 2000 glutton && \
|
||||
mkdir -p /var/log/glutton && \
|
||||
#
|
||||
# Clean up
|
||||
rm -rf /var/cache/apk/* \
|
||||
|
@ -50,5 +51,5 @@ RUN apk -U --no-cache add \
|
|||
#
|
||||
# Start glutton
|
||||
WORKDIR /opt/glutton
|
||||
#USER glutton:glutton
|
||||
USER glutton:glutton
|
||||
CMD exec bin/server -d true -i $(/sbin/ip address show | /usr/bin/awk '/inet.*brd/{ print $NF; exit }') -l /var/log/glutton/glutton.log > /dev/null 2>&1
|
||||
|
|
|
@ -16,8 +16,8 @@ services:
|
|||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: "dtagdevsec/glutton:alpha"
|
||||
# read_only: true
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/glutton/log:/var/log/glutton
|
||||
# - $HOME/tpotce/data/glutton/payloads:/opt/glutton/payloads
|
||||
- $HOME/tpotce/data/glutton/payloads:/opt/glutton/payloads
|
||||
# - $HOME/tpotce/docker/glutton/dist/rules.yaml:/opt/glutton/rules/rules.yaml
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM alpine:3.17
|
||||
FROM alpine:3.19
|
||||
#
|
||||
# Include dist
|
||||
COPY dist/ /root/dist/
|
||||
|
@ -36,8 +36,8 @@ RUN apk -U --no-cache add \
|
|||
cd ipphoney && \
|
||||
git checkout 7ab1cac437baba17cb2cd25d5bb1400327e1bb79 && \
|
||||
cp /root/dist/requirements.txt . && \
|
||||
pip3 install -r requirements.txt && \
|
||||
setcap cap_net_bind_service=+ep /usr/bin/python3.10 && \
|
||||
pip3 install --break-system-packages -r requirements.txt && \
|
||||
setcap cap_net_bind_service=+ep $(readlink -f $(type -P python3)) && \
|
||||
#
|
||||
# Setup user, groups and configs
|
||||
addgroup -g 2000 ipphoney && \
|
||||
|
|
|
@ -19,4 +19,4 @@ services:
|
|||
image: "dtagdevsec/ipphoney:alpha"
|
||||
read_only: true
|
||||
volumes:
|
||||
- /data/ipphoney/log:/opt/ipphoney/log
|
||||
- $HOME/tpotce/data/ipphoney/log:/opt/ipphoney/log
|
||||
|
|
|
@ -31,7 +31,7 @@ RUN apt-get update -y && \
|
|||
git checkout e224c0f786efb68b4aab892e69857e379b75b0c6 && \
|
||||
sed -i 's#"type": logtype,#"reason": logtype,#g' log4pot-server.py && \
|
||||
poetry install && \
|
||||
setcap cap_net_bind_service=+ep /usr/bin/python3.10 && \
|
||||
setcap cap_net_bind_service=+ep $(readlink -f $(which python3)) && \
|
||||
#
|
||||
# Setup user, groups and configs
|
||||
addgroup --gid 2000 log4pot && \
|
||||
|
|
|
@ -25,5 +25,5 @@ services:
|
|||
image: "dtagdevsec/log4pot:alpha"
|
||||
read_only: true
|
||||
volumes:
|
||||
- /data/log4pot/log:/var/log/log4pot/log
|
||||
- /data/log4pot/payloads:/var/log/log4pot/payloads
|
||||
- $HOME/tpotce/data/log4pot/log:/var/log/log4pot/log
|
||||
- $HOME/tpotce/data/log4pot/payloads:/var/log/log4pot/payloads
|
||||
|
|
|
@ -1,20 +1,22 @@
|
|||
FROM alpine:3.15
|
||||
FROM alpine:3.19
|
||||
#
|
||||
# Install packages
|
||||
RUN apk -U --no-cache add \
|
||||
git \
|
||||
libcap \
|
||||
python2 && \
|
||||
py3-pip \
|
||||
python3 && \
|
||||
#
|
||||
# Install mailoney from git
|
||||
git clone https://github.com/t3chn0m4g3/mailoney /opt/mailoney && \
|
||||
cd /opt/mailoney && \
|
||||
pip3 install --break-system-packages -r requirements.txt && \
|
||||
#
|
||||
# Setup user, groups and configs
|
||||
addgroup -g 2000 mailoney && \
|
||||
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 mailoney && \
|
||||
chown -R mailoney:mailoney /opt/mailoney && \
|
||||
setcap cap_net_bind_service=+ep /usr/bin/python2.7 && \
|
||||
setcap cap_net_bind_service=+ep $(readlink -f $(type -P python3)) && \
|
||||
#
|
||||
# Clean up
|
||||
apk del --purge git && \
|
||||
|
|
382
docker/mailoney/dist/schizo_open_relay.py
vendored
Normal file
382
docker/mailoney/dist/schizo_open_relay.py
vendored
Normal file
|
@ -0,0 +1,382 @@
|
|||
__author__ = '@botnet_hunter'
|
||||
|
||||
from datetime import datetime
|
||||
import socket
|
||||
try:
|
||||
import libemu
|
||||
except ImportError:
|
||||
libemu = None
|
||||
import sys
|
||||
import errno
|
||||
import time
|
||||
import threading
|
||||
from time import gmtime, strftime
|
||||
import asyncore
|
||||
import asynchat
|
||||
import re
|
||||
import json
|
||||
|
||||
sys.path.append("../")
|
||||
import mailoney
|
||||
|
||||
output_lock = threading.RLock()
|
||||
hpc,hpfeeds_prefix = mailoney.connect_hpfeeds()
|
||||
|
||||
def string_escape(s, encoding='utf-8'):
|
||||
return (s.encode('latin1') # To bytes, required by 'unicode-escape'
|
||||
.decode('unicode-escape') # Perform the actual octal-escaping decode
|
||||
.encode('latin1') # 1:1 mapping back to bytes
|
||||
.decode(encoding)) # Decode original encoding
|
||||
|
||||
# def log_to_file(file_path, ip, port, data):
|
||||
# with output_lock:
|
||||
# with open(file_path, "a") as f:
|
||||
# message = "[{0}][{1}:{2}] {3}".format(time.time(), ip, port, string_escape(data))
|
||||
# print(file_path + " " + message)
|
||||
# f.write(message + "\n")
|
||||
|
||||
def log_to_file(file_path, ip, port, data):
|
||||
with output_lock:
|
||||
try:
|
||||
with open(file_path, "a") as f:
|
||||
# Find all email addresses in the data
|
||||
emails = re.findall(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,6}\b', data)
|
||||
if len(data) > 4096:
|
||||
data = "BIGSIZE"
|
||||
dictmap = {
|
||||
'timestamp': strftime("20%y-%m-%dT%H:%M:%S.000000Z", gmtime()),
|
||||
'src_ip': ip,
|
||||
'src_port': port,
|
||||
'data': data,
|
||||
'smtp_input': emails
|
||||
}
|
||||
# Serialize the dictionary to a JSON-formatted string
|
||||
json_data = json.dumps(dictmap)
|
||||
f.write(json_data + '\n')
|
||||
# Format the message for logging
|
||||
message = "[{0}][{1}:{2}] {3}".format(time(), ip, port, repr(data))
|
||||
# Log the message to console
|
||||
print(file_path + " " + message)
|
||||
except Exception as e:
|
||||
# Log the error (or pass a specific message)
|
||||
print("An error occurred while logging to file: ", str(e))
|
||||
|
||||
def log_to_hpfeeds(channel, data):
|
||||
if hpc:
|
||||
message = data
|
||||
hpfchannel=hpfeeds_prefix+"."+channel
|
||||
hpc.publish(hpfchannel, message)
|
||||
|
||||
def process_packet_for_shellcode(packet, ip, port):
|
||||
if libemu is None:
|
||||
return
|
||||
emulator = libemu.Emulator()
|
||||
r = emulator.test(packet)
|
||||
if r is not None:
|
||||
# we have shellcode
|
||||
log_to_file(mailoney.logpath+"/shellcode.log", ip, port, "We have some shellcode")
|
||||
#log_to_file(mailoney.logpath+"/shellcode.log", ip, port, emulator.emu_profile_output)
|
||||
#log_to_hpfeeds("/shellcode", ip, port, emulator.emu_profile_output)
|
||||
log_to_file(mailoney.logpath+"/shellcode.log", ip, port, packet)
|
||||
log_to_hpfeeds("shellcode", json.dumps({ "Timestamp":format(time.time()), "ServerName": self.__fqdn, "SrcIP": self.__addr[0], "SrcPort": self.__addr[1],"Shellcode" :packet}))
|
||||
|
||||
def generate_version_date():
|
||||
now = datetime.now()
|
||||
week_days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
|
||||
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
|
||||
return "{0}, {1} {2} {3} {4}:{5}:{6}".format(week_days[now.weekday()], now.day, months[now.month - 1], now.year, str(now.hour).zfill(2), str(now.minute).zfill(2), str(now.second).zfill(2))
|
||||
|
||||
__version__ = 'ESMTP Exim 4.69 #1 {0} -0700'.format(generate_version_date())
|
||||
EMPTYSTRING = b''
|
||||
NEWLINE = b'\n'
|
||||
|
||||
class SMTPChannel(asynchat.async_chat):
|
||||
COMMAND = 0
|
||||
DATA = 1
|
||||
|
||||
def __init__(self, server, conn, addr):
|
||||
asynchat.async_chat.__init__(self, conn)
|
||||
self.__rolling_buffer = b""
|
||||
self.__server = server
|
||||
self.__conn = conn
|
||||
self.__addr = addr
|
||||
self.__line = []
|
||||
self.__state = self.COMMAND
|
||||
self.__greeting = 0
|
||||
self.__mailfrom = None
|
||||
self.__rcpttos = []
|
||||
self.__data = ''
|
||||
from mailoney import srvname
|
||||
self.__fqdn = srvname
|
||||
try:
|
||||
self.__peer = conn.getpeername()
|
||||
except socket.error as err:
|
||||
# a race condition may occur if the other end is closing
|
||||
# before we can get the peername
|
||||
self.close()
|
||||
# Instead of directly subscripting the err, use err.errno to get the error code.
|
||||
if err.errno != errno.ENOTCONN:
|
||||
raise
|
||||
return
|
||||
#print(>> DEBUGSTREAM, 'Peer:', repr(self.__peer))
|
||||
#self.set_terminator(b'\r\n')
|
||||
self.set_terminator(b'\n')
|
||||
self.push('220 %s %s' % (self.__fqdn, __version__))
|
||||
|
||||
# Overrides base class for convenience
|
||||
def push(self, msg):
|
||||
if type(msg) == str:
|
||||
encoded_msg = msg.encode()
|
||||
elif type(msg) == bytes:
|
||||
encoded_msg = msg
|
||||
|
||||
asynchat.async_chat.push(self, encoded_msg + self.terminator)
|
||||
|
||||
# Implementation of base class abstract method
|
||||
def collect_incoming_data(self, data):
|
||||
self.__line.append(data)
|
||||
self.__rolling_buffer += data
|
||||
if len(self.__rolling_buffer) > 1024 * 1024:
|
||||
self.__rolling_buffer = self.__rolling_buffer[len(self.__rolling_buffer) - 1024 * 1024:]
|
||||
process_packet_for_shellcode(self.__rolling_buffer, self.__addr[0], self.__addr[1])
|
||||
del data
|
||||
|
||||
# Implementation of base class abstract method
|
||||
def found_terminator(self):
|
||||
|
||||
line = EMPTYSTRING.join(self.__line).decode()
|
||||
log_to_file(mailoney.logpath+"/commands.log", self.__addr[0], self.__addr[1], string_escape(line))
|
||||
log_to_hpfeeds("commands", json.dumps({ "Timestamp":format(time.time()), "ServerName": self.__fqdn, "SrcIP": self.__addr[0], "SrcPort": self.__addr[1],"Commmand" : string_escape(line)}))
|
||||
|
||||
#print(>> DEBUGSTREAM, 'Data:', repr(line))
|
||||
self.__line = []
|
||||
if self.__state == self.COMMAND:
|
||||
if not line:
|
||||
self.push('500 Error: bad syntax')
|
||||
return
|
||||
method = None
|
||||
i = line.find(' ')
|
||||
if i < 0:
|
||||
command = line.upper()
|
||||
arg = None
|
||||
else:
|
||||
command = line[:i].upper()
|
||||
arg = line[i+1:].strip()
|
||||
method = getattr(self, 'smtp_' + command, None)
|
||||
if not method:
|
||||
self.push('502 Error: command "%s" not implemented' % command)
|
||||
return
|
||||
method(arg)
|
||||
return
|
||||
else:
|
||||
if self.__state != self.DATA:
|
||||
self.push('451 Internal confusion')
|
||||
return
|
||||
# Remove extraneous carriage returns and de-transparency according
|
||||
# to RFC 821, Section 4.5.2.
|
||||
data = []
|
||||
for text in line.split('\r\n'):
|
||||
if text and text[0] == '.':
|
||||
data.append(text[1:])
|
||||
else:
|
||||
data.append(text)
|
||||
self.__data = NEWLINE.join(data)
|
||||
status = self.__server.process_message(self.__peer, self.__mailfrom, self.__rcpttos, self.__data)
|
||||
self.__rcpttos = []
|
||||
self.__mailfrom = None
|
||||
self.__state = self.COMMAND
|
||||
self.set_terminator('\r\n')
|
||||
if not status:
|
||||
self.push('250 Ok')
|
||||
else:
|
||||
self.push(status)
|
||||
|
||||
# SMTP and ESMTP commands
|
||||
def smtp_HELO(self, arg):
|
||||
if not arg:
|
||||
self.push('501 Syntax: HELO hostname')
|
||||
return
|
||||
if self.__greeting:
|
||||
self.push('503 Duplicate HELO/EHLO')
|
||||
else:
|
||||
self.__greeting = arg
|
||||
self.push('250 %s' % self.__fqdn)
|
||||
|
||||
def smtp_EHLO(self, arg):
|
||||
if not arg:
|
||||
self.push('501 Syntax: EHLO hostname')
|
||||
return
|
||||
if self.__greeting:
|
||||
self.push('503 Duplicate HELO/EHLO')
|
||||
else:
|
||||
self.__greeting = arg
|
||||
self.push('250-{0} Hello {1} [{2}]'.format(self.__fqdn, arg, self.__addr[0]))
|
||||
self.push('250-SIZE 52428800')
|
||||
self.push('250 AUTH LOGIN PLAIN')
|
||||
|
||||
def smtp_NOOP(self, arg):
|
||||
if arg:
|
||||
self.push('501 Syntax: NOOP')
|
||||
else:
|
||||
self.push('250 Ok')
|
||||
|
||||
def smtp_QUIT(self, arg):
|
||||
# args is ignored
|
||||
self.push('221 Bye')
|
||||
self.close_when_done()
|
||||
|
||||
def smtp_AUTH(self, arg):
|
||||
# Accept any auth attempt
|
||||
self.push('235 Authentication succeeded')
|
||||
|
||||
# factored
|
||||
def __getaddr(self, keyword, arg):
|
||||
address = None
|
||||
keylen = len(keyword)
|
||||
if arg[:keylen].upper() == keyword:
|
||||
address = arg[keylen:].strip()
|
||||
if not address:
|
||||
pass
|
||||
elif address[0] == '<' and address[-1] == '>' and address != '<>':
|
||||
# Addresses can be in the form <person@dom.com> but watch out
|
||||
# for null address, e.g. <>
|
||||
address = address[1:-1]
|
||||
return address
|
||||
|
||||
def smtp_MAIL(self, arg):
|
||||
#print(>> DEBUGSTREAM, '===> MAIL', arg)
|
||||
address = self.__getaddr('FROM:', arg) if arg else None
|
||||
if not address:
|
||||
self.push('501 Syntax: MAIL FROM:<address>')
|
||||
return
|
||||
if self.__mailfrom:
|
||||
self.push('503 Error: nested MAIL command')
|
||||
return
|
||||
self.__mailfrom = address
|
||||
#print(>> DEBUGSTREAM, 'sender:', self.__mailfrom)
|
||||
self.push('250 Ok')
|
||||
|
||||
def smtp_RCPT(self, arg):
|
||||
#print(>> DEBUGSTREAM, '===> RCPT', arg)
|
||||
if not self.__mailfrom:
|
||||
self.push('503 Error: need MAIL command')
|
||||
return
|
||||
address = self.__getaddr('TO:', arg) if arg else None
|
||||
if not address:
|
||||
self.push('501 Syntax: RCPT TO: <address>')
|
||||
return
|
||||
self.__rcpttos.append(address)
|
||||
#print(>> DEBUGSTREAM, 'recips:', self.__rcpttos)
|
||||
self.push('250 Ok')
|
||||
|
||||
def smtp_RSET(self, arg):
|
||||
if arg:
|
||||
self.push('501 Syntax: RSET')
|
||||
return
|
||||
# Resets the sender, recipients, and data, but not the greeting
|
||||
self.__mailfrom = None
|
||||
self.__rcpttos = []
|
||||
self.__data = ''
|
||||
self.__state = self.COMMAND
|
||||
self.push('250 Ok')
|
||||
|
||||
def smtp_DATA(self, arg):
|
||||
if not self.__rcpttos:
|
||||
self.push('503 Error: need RCPT command')
|
||||
return
|
||||
if arg:
|
||||
self.push('501 Syntax: DATA')
|
||||
return
|
||||
self.__state = self.DATA
|
||||
self.set_terminator('\r\n.\r\n')
|
||||
self.push('354 End data with <CR><LF>.<CR><LF>')
|
||||
|
||||
|
||||
class SMTPServer(asyncore.dispatcher):
|
||||
def __init__(self, localaddr, remoteaddr):
|
||||
self._localaddr = localaddr
|
||||
self._remoteaddr = remoteaddr
|
||||
asyncore.dispatcher.__init__(self)
|
||||
try:
|
||||
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
# try to re-use a server port if possible
|
||||
self.set_reuse_addr()
|
||||
self.bind(localaddr)
|
||||
self.listen(5)
|
||||
except:
|
||||
# cleanup asyncore.socket_map before raising
|
||||
self.close()
|
||||
raise
|
||||
else:
|
||||
pass
|
||||
#print(>> DEBUGSTREAM, '%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (self.__class__.__name__, time.ctime(time.time()), localaddr, remoteaddr))
|
||||
|
||||
def handle_accept(self):
|
||||
pair = self.accept()
|
||||
if pair is not None:
|
||||
conn, addr = pair
|
||||
channel = SMTPChannel(self, conn, addr)
|
||||
|
||||
def handle_close(self):
|
||||
self.close()
|
||||
|
||||
# API for "doing something useful with the message"
|
||||
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,rcpt_options=None):
|
||||
"""Override this abstract method to handle messages from the client.
|
||||
|
||||
peer is a tuple containing (ipaddr, port) of the client that made the
|
||||
socket connection to our smtp port.
|
||||
|
||||
mailfrom is the raw address the client claims the message is coming
|
||||
from.
|
||||
|
||||
rcpttos is a list of raw addresses the client wishes to deliver the
|
||||
message to.
|
||||
|
||||
data is a string containing the entire full text of the message,
|
||||
headers (if supplied) and all. It has been `de-transparencied'
|
||||
according to RFC 821, Section 4.5.2. In other words, a line
|
||||
containing a `.' followed by other text has had the leading dot
|
||||
removed.
|
||||
|
||||
This function should return None, for a normal `250 Ok' response;
|
||||
otherwise it returns the desired response string in RFC 821 format.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
|
||||
def module():
|
||||
|
||||
class SchizoOpenRelay(SMTPServer):
|
||||
|
||||
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,rcpt_options=None):
|
||||
#setup the Log File
|
||||
log_to_file(mailoney.logpath+"/mail.log", peer[0], peer[1], '')
|
||||
log_to_file(mailoney.logpath+"/mail.log", peer[0], peer[1], '*' * 50)
|
||||
log_to_file(mailoney.logpath+"/mail.log", peer[0], peer[1], 'Mail from: {0}'.format(mailfrom))
|
||||
log_to_file(mailoney.logpath+"/mail.log", peer[0], peer[1], 'Mail to: {0}'.format(", ".join(rcpttos)))
|
||||
log_to_file(mailoney.logpath+"/mail.log", peer[0], peer[1], 'Data:')
|
||||
log_to_file(mailoney.logpath+"/mail.log", peer[0], peer[1], data)
|
||||
|
||||
loghpfeeds = {}
|
||||
loghpfeeds['ServerName'] = mailoney.srvname
|
||||
loghpfeeds['Timestamp'] = format(time.time())
|
||||
loghpfeeds['SrcIP'] = peer[0]
|
||||
loghpfeeds['SrcPort'] = peer[1]
|
||||
loghpfeeds['MailFrom'] = mailfrom
|
||||
loghpfeeds['MailTo'] = format(", ".join(rcpttos))
|
||||
loghpfeeds['Data'] = data
|
||||
log_to_hpfeeds("mail", json.dumps(loghpfeeds))
|
||||
|
||||
|
||||
def run():
|
||||
honeypot = SchizoOpenRelay((mailoney.bind_ip, mailoney.bind_port), None)
|
||||
print('[*] Mail Relay listening on {}:{}'.format(mailoney.bind_ip, mailoney.bind_port))
|
||||
try:
|
||||
asyncore.loop()
|
||||
print("exiting for some unknown reason")
|
||||
except KeyboardInterrupt:
|
||||
print('Detected interruption, terminating...')
|
||||
run()
|
|
@ -25,4 +25,4 @@ services:
|
|||
image: "dtagdevsec/mailoney:alpha"
|
||||
read_only: true
|
||||
volumes:
|
||||
- /data/mailoney/log:/opt/mailoney/logs
|
||||
- $HOME/tpotce/data/mailoney/log:/opt/mailoney/logs
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM alpine:3.17
|
||||
FROM golang:1.21-alpine as builder
|
||||
#
|
||||
# Setup apk
|
||||
RUN apk -U --no-cache add \
|
||||
|
@ -22,27 +22,21 @@ RUN apk -U --no-cache add \
|
|||
go get -d -v go.uber.org/zap && \
|
||||
cd medpot && \
|
||||
cp dist/etc/ews.cfg /etc/ && \
|
||||
go build medpot && \
|
||||
go build medpot
|
||||
#
|
||||
FROM alpine:3.19
|
||||
#
|
||||
# Setup medpot
|
||||
mkdir -p /opt/medpot \
|
||||
/var/log/medpot && \
|
||||
cp medpot /opt/medpot && \
|
||||
cp /opt/go/src/medpot/template/*.xml /opt/medpot/ && \
|
||||
#
|
||||
COPY --from=builder /opt/go/src/medpot/medpot /opt/medpot/medpot
|
||||
COPY --from=builder /opt/go/src/medpot/template/*.xml /opt/medpot/
|
||||
COPY --from=builder /opt/go/src/medpot/dist/etc/ews.cfg /etc/ews.cfg
|
||||
RUN mkdir -p /var/log/medpot && \
|
||||
#
|
||||
# Setup user, groups and configs
|
||||
addgroup -g 2000 medpot && \
|
||||
adduser -S -s /bin/ash -u 2000 -D -g 2000 medpot && \
|
||||
chown -R medpot:medpot /var/log/medpot && \
|
||||
#
|
||||
# Clean up
|
||||
apk del --purge build-base \
|
||||
git \
|
||||
go \
|
||||
g++ && \
|
||||
rm -rf /var/cache/apk/* \
|
||||
/opt/go \
|
||||
/root/dist
|
||||
chown -R medpot:medpot /var/log/medpot
|
||||
#
|
||||
# Start medpot
|
||||
WORKDIR /opt/medpot
|
||||
|
|
|
@ -19,4 +19,4 @@ services:
|
|||
image: "dtagdevsec/medpot:alpha"
|
||||
read_only: true
|
||||
volumes:
|
||||
- /data/medpot/log/:/var/log/medpot
|
||||
- $HOME/tpotce/data/medpot/log/:/var/log/medpot
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM alpine:3.17
|
||||
FROM alpine:3.19
|
||||
#
|
||||
# Include dist
|
||||
COPY dist/ /root/dist/
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
#FROM node:17.9.0-alpine3.15 as builder
|
||||
FROM node:18-alpine3.15 as builder
|
||||
FROM node:18-alpine as builder
|
||||
#
|
||||
# Prep and build Cyberchef
|
||||
ENV CY_VER=v9.55.0
|
||||
ENV CY_VER=v10.8.2
|
||||
RUN apk -U --no-cache add build-base git python3 && \
|
||||
chown -R node:node /srv && \
|
||||
npm install -g grunt-cli
|
||||
|
|
|
@ -1,19 +1,21 @@
|
|||
FROM node:14.18-alpine AS builder
|
||||
### elasticvue build is currently broken, issue has been opened https://github.com/cars10/elasticvue/issues/215
|
||||
### in the meantime we are using the older dist, if not resolved we need to find a different solution
|
||||
FROM node:20-alpine AS builder
|
||||
#
|
||||
# Prep and build Elasticvue
|
||||
RUN apk -U --no-cache add git && \
|
||||
git clone https://github.com/cars10/elasticvue /opt/src && \
|
||||
# We need to adjust consts.js so the user has connection suggestion for reverse proxied ES
|
||||
sed -i "s#export const DEFAULT_HOST = 'http://localhost:9200'#export const DEFAULT_HOST = window.location.origin + '/es'#g" /opt/src/src/consts.js && \
|
||||
sed -i 's#href="/images/logo/favicon.ico"#href="images/logo/favicon.ico"#g' /opt/src/public/index.html && \
|
||||
git clone https://github.com/cars10/elasticvue -b v1.0.4 /opt/src && \
|
||||
# We need to adjust consts.ts so the user has connection suggestion for reverse proxied ES
|
||||
sed -i "s#export const DEFAULT_CLUSTER_URI = 'http://localhost:9200'#export const DEFAULT_CLUSTER_URI = window.location.origin + '/es'#g" /opt/src/src/consts.ts && \
|
||||
sed -i 's#href="/images/logo/favicon.ico"#href="images/logo/favicon.ico"#g' /opt/src/index.html && \
|
||||
mkdir /opt/app && \
|
||||
cd /opt/app && \
|
||||
cp /opt/src/package.json . && \
|
||||
cp /opt/src/yarn.lock . && \
|
||||
yarn install --ignore-optional && \
|
||||
yarn install && \
|
||||
cp -R /opt/src/* . && \
|
||||
# We need to set this ENV so we can run Elasticvue in its own location rather than /
|
||||
VUE_APP_PUBLIC_PATH=/elasticvue/ yarn build && \
|
||||
VITE_APP_BUILD_MODE=docker VUE_APP_PUBLIC_PATH=/elasticvue/ yarn build && \
|
||||
yarn build && \
|
||||
cd dist && \
|
||||
tar cvfz esvue.tgz *
|
||||
#
|
||||
|
|
21
docker/nginx/builder/esvue/Dockerfile.old
Normal file
21
docker/nginx/builder/esvue/Dockerfile.old
Normal file
|
@ -0,0 +1,21 @@
|
|||
FROM node:14.18-alpine AS builder
|
||||
#
|
||||
# Prep and build Elasticvue
|
||||
RUN apk -U --no-cache add git && \
|
||||
git clone https://github.com/cars10/elasticvue -b v0.44.0 /opt/src && \
|
||||
# We need to adjust consts.js so the user has connection suggestion for reverse proxied ES
|
||||
sed -i "s#export const DEFAULT_HOST = 'http://localhost:9200'#export const DEFAULT_HOST = window.location.origin + '/es'#g" /opt/src/src/consts.js && \
|
||||
sed -i 's#href="/images/logo/favicon.ico"#href="images/logo/favicon.ico"#g' /opt/src/public/index.html && \
|
||||
mkdir /opt/app && \
|
||||
cd /opt/app && \
|
||||
cp /opt/src/package.json . && \
|
||||
cp /opt/src/yarn.lock . && \
|
||||
yarn install --ignore-optional && \
|
||||
cp -R /opt/src/* . && \
|
||||
# We need to set this ENV so we can run Elasticvue in its own location rather than /
|
||||
VUE_APP_PUBLIC_PATH=/elasticvue/ yarn build && \
|
||||
cd dist && \
|
||||
tar cvfz esvue.tgz *
|
||||
#
|
||||
FROM scratch AS exporter
|
||||
COPY --from=builder /opt/app/dist/esvue.tgz /
|
|
@ -1,3 +1,5 @@
|
|||
#!/bin/bash
|
||||
# Needs buildx to build. Run tpotce/bin/setup-builder.sh first
|
||||
docker buildx build --output ../../dist/html/esvue/ .
|
||||
echo "do not build!"
|
||||
exit 0
|
||||
docker buildx build --no-cache --progress plain --output ../../dist/html/esvue/ .
|
||||
|
|
BIN
docker/nginx/dist/html/cyberchef/cyberchef.tgz
vendored
BIN
docker/nginx/dist/html/cyberchef/cyberchef.tgz
vendored
Binary file not shown.
|
@ -25,6 +25,6 @@ services:
|
|||
image: "dtagdevsec/nginx:alpha"
|
||||
read_only: true
|
||||
volumes:
|
||||
- /data/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- /data/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- /data/nginx/log/:/var/log/nginx/
|
||||
- $HOME/tpotce/data/nginx/cert/:/etc/nginx/cert/:ro
|
||||
- $HOME/tpotce/data/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
||||
- $HOME/tpotce/data/nginx/log/:/var/log/nginx/
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# In case of problems Alpine 3.13 needs to be used:
|
||||
# https://wiki.alpinelinux.org/wiki/Release_Notes_for_Alpine_3.14.0#faccessat2
|
||||
FROM alpine:3.17
|
||||
FROM alpine:3.19
|
||||
#
|
||||
# Add source
|
||||
COPY . /opt/p0f
|
||||
|
|
|
@ -13,4 +13,4 @@ services:
|
|||
image: "dtagdevsec/p0f:alpha"
|
||||
read_only: true
|
||||
volumes:
|
||||
- /data/p0f/log:/var/log/p0f
|
||||
- $HOME/tpotce/data/p0f/log:/var/log/p0f
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM alpine:3.17
|
||||
FROM golang:1.21-alpine as builder
|
||||
#
|
||||
# Include dist
|
||||
COPY dist/ /root/dist/
|
||||
|
@ -18,25 +18,19 @@ RUN apk -U --no-cache add \
|
|||
cd RedisHoneyPot && \
|
||||
git checkout 45adc622a423d12d76392c3a54274f6cff111d58 && \
|
||||
go mod download && \
|
||||
go install && \
|
||||
mkdir -p /opt/redishoneypot && \
|
||||
mv /opt/go/bin/RedisHoneyPot /opt/redishoneypot/ && \
|
||||
mv /root/dist/redis.conf /opt/redishoneypot && \
|
||||
go install
|
||||
#
|
||||
FROM alpine:3.19
|
||||
#
|
||||
# Setup redishoneypot
|
||||
#
|
||||
COPY --from=builder /opt/go/bin/RedisHoneyPot /opt/redishoneypot/
|
||||
COPY --from=builder /root/dist/redis.conf /opt/redishoneypot/
|
||||
#
|
||||
# Setup user, groups and configs
|
||||
addgroup -g 2000 redishoneypot && \
|
||||
RUN addgroup -g 2000 redishoneypot && \
|
||||
adduser -S -s /bin/ash -u 2000 -D -g 2000 redishoneypot && \
|
||||
mkdir -p /var/log/redishoneypot && \
|
||||
#
|
||||
# Clean up
|
||||
apk del --purge build-base \
|
||||
git \
|
||||
go \
|
||||
g++ && \
|
||||
rm -rf /var/cache/apk/* \
|
||||
/opt/go \
|
||||
/root/* \
|
||||
/opt/redishoneypot/.git
|
||||
mkdir -p /var/log/redishoneypot
|
||||
#
|
||||
# Start redishoneypot
|
||||
WORKDIR /opt/redishoneypot
|
||||
|
|
|
@ -19,4 +19,4 @@ services:
|
|||
image: "dtagdevsec/redishoneypot:alpha"
|
||||
read_only: true
|
||||
volumes:
|
||||
- /data/redishoneypot/log:/var/log/redishoneypot
|
||||
- $HOME/tpotce/data/redishoneypot/log:/var/log/redishoneypot
|
||||
|
|
|
@ -13,18 +13,18 @@ services:
|
|||
# cpu_count: 1
|
||||
# cpus: 0.25
|
||||
environment:
|
||||
# - SENTRYPEER_WEB_GUI=0
|
||||
# - SENTRYPEER_PEER_TO_PEER=0
|
||||
# - SENTRYPEER_PEER_TO_PEER=1
|
||||
# - SENTRYPEER_BOOTSTRAP_NODE=bootstrap.sentrypeer.org
|
||||
- SENTRYPEER_VERBOSE=1
|
||||
- SENTRYPEER_DEBUG=1
|
||||
networks:
|
||||
- sentrypeer_local
|
||||
ports:
|
||||
- "4222:4222/udp"
|
||||
# - "4222:4222/udp"
|
||||
- "5060:5060/udp"
|
||||
- "127.0.0.1:8082:8082"
|
||||
- "5060:5060/tcp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: "dtagdevsec/sentrypeer:alpha"
|
||||
read_only: true
|
||||
volumes:
|
||||
- /data/sentrypeer/log:/var/log/sentrypeer
|
||||
- $HOME/tpotce/data/sentrypeer/log:/var/log/sentrypeer
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM alpine:3.17
|
||||
FROM alpine:3.19
|
||||
#
|
||||
# Include dist
|
||||
COPY dist/ /root/dist/
|
||||
|
@ -38,7 +38,7 @@ RUN apk -U --no-cache add \
|
|||
py3-openssl \
|
||||
py3-pillow \
|
||||
py3-portend \
|
||||
py3-pypdf2 \
|
||||
# py3-pypdf2 \
|
||||
py3-phonenumbers \
|
||||
py3-pip \
|
||||
py3-pysocks \
|
||||
|
@ -57,11 +57,14 @@ RUN apk -U --no-cache add \
|
|||
adduser -S -s /bin/ash -u 2000 -D -g 2000 spiderfoot && \
|
||||
#
|
||||
# Install spiderfoot
|
||||
git clone --depth=1 -b v4.0 https://github.com/smicallef/spiderfoot /home/spiderfoot && \
|
||||
# git clone --depth=1 -b v4.0 https://github.com/smicallef/spiderfoot /home/spiderfoot && \
|
||||
git config --global --add safe.directory /home/spiderfoot && \
|
||||
git clone https://github.com/smicallef/spiderfoot /home/spiderfoot && \
|
||||
cd /home/spiderfoot && \
|
||||
pip3 install --upgrade pip && \
|
||||
git checkout 0f815a203afebf05c98b605dba5cf0475a0ee5fd && \
|
||||
pip3 install --break-system-packages --upgrade pip && \
|
||||
cp /root/dist/requirements.txt . && \
|
||||
pip3 install --no-cache-dir -r requirements.txt && \
|
||||
pip3 install --break-system-packages --no-cache-dir -r requirements.txt && \
|
||||
mkdir -p /home/spiderfoot/.spiderfoot/logs && \
|
||||
chown -R spiderfoot:spiderfoot /home/spiderfoot && \
|
||||
sed -i "s#'root': '\/'#'root': '\/spiderfoot'#" /home/spiderfoot/sf.py && \
|
||||
|
|
1
docker/spiderfoot/dist/requirements.txt
vendored
1
docker/spiderfoot/dist/requirements.txt
vendored
|
@ -9,3 +9,4 @@ python-docx>=0.8.11,<0.9
|
|||
python-pptx>=0.6.21,<0.7
|
||||
publicsuffixlist>=0.7.9,<0.8
|
||||
openpyxl>=3.0.9,<4
|
||||
PyPDF2>=1.28.6,<2
|
||||
|
|
|
@ -18,4 +18,4 @@ services:
|
|||
- "127.0.0.1:64303:8080"
|
||||
image: "dtagdevsec/spiderfoot:alpha"
|
||||
volumes:
|
||||
- /data/spiderfoot:/home/spiderfoot/.spiderfoot
|
||||
- $HOME/tpotce/data/spiderfoot:/home/spiderfoot/.spiderfoot
|
||||
|
|
5
docker/suricata/dist/capture-filter.bpf
vendored
5
docker/suricata/dist/capture-filter.bpf
vendored
|
@ -1,6 +1,3 @@
|
|||
not (host sicherheitstacho.eu or community.sicherheitstacho.eu or listbot.sicherheitstacho.eu) and
|
||||
not (host rules.emergingthreats.net or rules.emergingthreatspro.com) and
|
||||
not (host deb.debian.org) and
|
||||
not (host ghcr.io) and
|
||||
not (host index.docker.io or docker.io) and
|
||||
not (tcp port 64294) and
|
||||
not (tcp port 64305)
|
||||
|
|
412
docker/suricata/dist/suricata.yaml
vendored
412
docker/suricata/dist/suricata.yaml
vendored
|
@ -3,7 +3,10 @@
|
|||
|
||||
# Suricata configuration file. In addition to the comments describing all
|
||||
# options in this file, full documentation can be found at:
|
||||
# https://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html
|
||||
# https://docs.suricata.io/en/latest/configuration/suricata-yaml.html
|
||||
|
||||
# This configuration file generated by Suricata 7.0.2.
|
||||
suricata-version: "7.0"
|
||||
|
||||
##
|
||||
## Step 1: Inform Suricata about your network
|
||||
|
@ -18,8 +21,8 @@ vars:
|
|||
#HOME_NET: "[172.16.0.0/12]"
|
||||
#HOME_NET: "any"
|
||||
|
||||
#EXTERNAL_NET: "!$HOME_NET"
|
||||
EXTERNAL_NET: "any"
|
||||
EXTERNAL_NET: "!$HOME_NET"
|
||||
#EXTERNAL_NET: "any"
|
||||
|
||||
HTTP_SERVERS: "$HOME_NET"
|
||||
SMTP_SERVERS: "$HOME_NET"
|
||||
|
@ -67,10 +70,14 @@ stats:
|
|||
#decoder-events: true
|
||||
# Decoder event prefix in stats. Has been 'decoder' before, but that leads
|
||||
# to missing events in the eve.stats records. See issue #2225.
|
||||
decoder-events-prefix: "decoder.event"
|
||||
#decoder-events-prefix: "decoder.event"
|
||||
# Add stream events as stats.
|
||||
#stream-events: false
|
||||
|
||||
# Plugins -- Experimental -- specify the filename for each plugin shared object
|
||||
plugins:
|
||||
# - /path/to/plugin.so
|
||||
|
||||
# Configure the type of alert (and other) logging you would like.
|
||||
outputs:
|
||||
# a line based alerts log similar to Snort's fast.log
|
||||
|
@ -86,7 +93,7 @@ outputs:
|
|||
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
|
||||
filename: eve.json
|
||||
# Enable for multi-threaded eve.json output; output files are amended with
|
||||
# with an identifier, e.g., eve.9.json
|
||||
# an identifier, e.g., eve.9.json
|
||||
#threaded: false
|
||||
#prefix: "@cee: " # prefix to prepend to each log entry
|
||||
# the following are valid when type: syslog above
|
||||
|
@ -161,6 +168,14 @@ outputs:
|
|||
# Enable the logging of tagged packets for rules using the
|
||||
# "tag" keyword.
|
||||
tagged-packets: yes
|
||||
# Enable logging the final action taken on a packet by the engine
|
||||
# (e.g: the alert may have action 'allowed' but the verdict be
|
||||
# 'drop' due to another alert. That's the engine's verdict)
|
||||
# verdict: yes
|
||||
# app layer frames
|
||||
- frame:
|
||||
# disabled by default as this is very verbose.
|
||||
enabled: no
|
||||
- anomaly:
|
||||
# Anomaly log records describe unexpected conditions such
|
||||
# as truncated packets, packets with invalid IP/UDP/TCP
|
||||
|
@ -206,7 +221,7 @@ outputs:
|
|||
- dns:
|
||||
# This configuration uses the new DNS logging format,
|
||||
# the old configuration is still available:
|
||||
# https://suricata.readthedocs.io/en/latest/output/eve/eve-json-output.html#dns-v1-format
|
||||
# https://docs.suricata.io/en/latest/output/eve/eve-json-output.html#dns-v1-format
|
||||
|
||||
# As of Suricata 5.0, version 2 of the eve dns output
|
||||
# format is the default.
|
||||
|
@ -247,6 +262,9 @@ outputs:
|
|||
# alerts: yes # log alerts that caused drops
|
||||
# flows: all # start or all: 'start' logs only a single drop
|
||||
# # per flow direction. All logs each dropped pkt.
|
||||
# Enable logging the final action taken on a packet by the engine
|
||||
# (will show more information in case of a drop caused by 'reject')
|
||||
# verdict: yes
|
||||
- smtp:
|
||||
extended: yes # enable this for extended logging information
|
||||
# this includes: bcc, message-id, subject, x_mailer, user-agent
|
||||
|
@ -254,7 +272,7 @@ outputs:
|
|||
# reply-to, bcc, message-id, subject, x-mailer, user-agent, received,
|
||||
# x-originating-ip, in-reply-to, references, importance, priority,
|
||||
# sensitivity, organization, content-md5, date
|
||||
custom: [reply-to, bcc, message-id, subject, x-mailer, user-agent, received, x-originating-ip, in-reply-to, references, organization, date]
|
||||
custom: [bcc, message-id, subject, x_mailer, user-agent, reply-to, received, x-originating-ip, in-reply-to, references, importance, priority, sensitivity, organization, content-md5, date, relays]
|
||||
# output md5 of fields: body, subject
|
||||
# for the body you need to set app-layer.protocols.smtp.mime.body-md5
|
||||
# to yes
|
||||
|
@ -266,12 +284,14 @@ outputs:
|
|||
- nfs
|
||||
- smb
|
||||
- tftp
|
||||
- ikev2
|
||||
- ike
|
||||
- dcerpc
|
||||
- krb5
|
||||
- bittorrent-dht
|
||||
- snmp
|
||||
- rfb
|
||||
- sip
|
||||
- quic
|
||||
- dhcp:
|
||||
enabled: no
|
||||
# When extended mode is on, all DHCP messages are logged
|
||||
|
@ -282,16 +302,16 @@ outputs:
|
|||
- ssh
|
||||
- mqtt:
|
||||
passwords: yes # enable output of passwords
|
||||
# HTTP2 logging. HTTP2 support is currently experimental and
|
||||
# disabled by default. To enable, uncomment the following line
|
||||
# and be sure to enable http2 in the app-layer section.
|
||||
#- http2
|
||||
- http2
|
||||
- pgsql:
|
||||
enabled: yes
|
||||
passwords: yes # enable output of passwords. Disabled by default
|
||||
#- stats:
|
||||
#totals: yes # stats for all threads merged together
|
||||
#threads: no # per thread stats
|
||||
#deltas: no # include delta values
|
||||
# totals: no # stats for all threads merged together
|
||||
# threads: no # per thread stats
|
||||
# deltas: no # include delta values
|
||||
# bi-directional flows
|
||||
#- flow
|
||||
- flow
|
||||
# uni-directional flows
|
||||
#- netflow
|
||||
|
||||
|
@ -300,6 +320,16 @@ outputs:
|
|||
# flowints.
|
||||
#- metadata
|
||||
|
||||
# EXPERIMENTAL per packet output giving TCP state tracking details
|
||||
# including internal state, flags, etc.
|
||||
# This output is experimental, meant for debugging and subject to
|
||||
# change in both config and output without any notice.
|
||||
#- stream:
|
||||
# all: false # log all TCP packets
|
||||
# event-set: false # log packets that have a decoder/stream event
|
||||
# state-update: false # log packets triggering a TCP state update
|
||||
# spurious-retransmission: false # log spurious retransmission packets
|
||||
|
||||
# a line based log of HTTP requests (no alerts)
|
||||
- http-log:
|
||||
enabled: no
|
||||
|
@ -390,6 +420,9 @@ outputs:
|
|||
#ts-format: usec # sec or usec second format (default) is filename.sec usec is filename.sec.usec
|
||||
use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets
|
||||
honor-pass-rules: no # If set to "yes", flows in which a pass rule matched will stop being logged.
|
||||
# Use "all" to log all packets or use "alerts" to log only alerted packets and flows or "tag"
|
||||
# to log only flow tagged via the "tag" keyword
|
||||
#conditional: all
|
||||
|
||||
# a full alert log containing much information for signature writers
|
||||
# or for investigating suspected false positives.
|
||||
|
@ -399,14 +432,6 @@ outputs:
|
|||
append: yes
|
||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
||||
|
||||
# alert output to prelude (https://www.prelude-siem.org/) only
|
||||
# available if Suricata has been compiled with --enable-prelude
|
||||
- alert-prelude:
|
||||
enabled: no
|
||||
profile: suricata
|
||||
log-packet-content: no
|
||||
log-packet-header: yes
|
||||
|
||||
# Stats.log contains data from various counters of the Suricata engine.
|
||||
- stats:
|
||||
enabled: no
|
||||
|
@ -521,7 +546,7 @@ outputs:
|
|||
# Lua Output Support - execute lua script to generate alert and event
|
||||
# output.
|
||||
# Documented at:
|
||||
# https://suricata.readthedocs.io/en/latest/output/lua-output.html
|
||||
# https://docs.suricata.io/en/latest/output/lua-output.html
|
||||
- lua:
|
||||
enabled: no
|
||||
#scripts-dir: /etc/suricata/lua-output/
|
||||
|
@ -542,8 +567,11 @@ logging:
|
|||
# something reasonable if not provided. Can be overridden in an
|
||||
# output section. You can leave this out to get the default.
|
||||
#
|
||||
# This value is overridden by the SC_LOG_FORMAT env var.
|
||||
#default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- "
|
||||
# This console log format value can be overridden by the SC_LOG_FORMAT env var.
|
||||
#default-log-format: "%D: %S: %M"
|
||||
#
|
||||
# For the pre-7.0 log format use:
|
||||
#default-log-format: "[%i] %t [%S] - (%f:%l) <%d> (%n) -- "
|
||||
|
||||
# A regex to filter output. Can be overridden in an output section.
|
||||
# Defaults to empty (no filter).
|
||||
|
@ -551,6 +579,11 @@ logging:
|
|||
# This value is overridden by the SC_LOG_OP_FILTER env var.
|
||||
default-output-filter:
|
||||
|
||||
# Requires libunwind to be available when Suricata is configured and built.
|
||||
# If a signal unexpectedly terminates Suricata, displays a brief diagnostic
|
||||
# message with the offending stacktrace if enabled.
|
||||
#stacktrace-on-signal: on
|
||||
|
||||
# Define your logging outputs. If none are defined, or they are all
|
||||
# disabled you will get the default: console output.
|
||||
outputs:
|
||||
|
@ -561,6 +594,7 @@ logging:
|
|||
enabled: yes
|
||||
level: info
|
||||
filename: /var/log/suricata/suricata.log
|
||||
# format: "[%i - %m] %z %d: %S: %M"
|
||||
# type: json
|
||||
- syslog:
|
||||
enabled: no
|
||||
|
@ -594,6 +628,7 @@ af-packet:
|
|||
# more info.
|
||||
# Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system
|
||||
# with capture card using RSS (requires cpu affinity tuning and system IRQ tuning)
|
||||
# cluster_rollover has been deprecated; if used, it'll be replaced with cluster_flow.
|
||||
cluster-type: cluster_flow
|
||||
# In some fragmentation cases, the hash can not be computed. If "defrag" is set
|
||||
# to yes, the kernel will do the needed defragmentation before sending the packets.
|
||||
|
@ -656,6 +691,117 @@ af-packet:
|
|||
#use-mmap: no
|
||||
#tpacket-v3: yes
|
||||
|
||||
# Linux high speed af-xdp capture support
|
||||
af-xdp:
|
||||
- interface: default
|
||||
# Number of receive threads. "auto" uses least between the number
|
||||
# of cores and RX queues
|
||||
#threads: auto
|
||||
#disable-promisc: false
|
||||
# XDP_DRV mode can be chosen when the driver supports XDP
|
||||
# XDP_SKB mode can be chosen when the driver does not support XDP
|
||||
# Possible values are:
|
||||
# - drv: enable XDP_DRV mode
|
||||
# - skb: enable XDP_SKB mode
|
||||
# - none: disable (kernel in charge of applying mode)
|
||||
#force-xdp-mode: none
|
||||
# During socket binding the kernel will attempt zero-copy, if this
|
||||
# fails it will fallback to copy. If this fails, the bind fails.
|
||||
# The bind can be explicitly configured using the option below.
|
||||
# If configured, the bind will fail if not successful (no fallback).
|
||||
# Possible values are:
|
||||
# - zero: enable zero-copy mode
|
||||
# - copy: enable copy mode
|
||||
# - none: disable (kernel in charge of applying mode)
|
||||
#force-bind-mode: none
|
||||
# Memory alignment mode can vary between two modes, aligned and
|
||||
# unaligned chunk modes. By default, aligned chunk mode is selected.
|
||||
# select 'yes' to enable unaligned chunk mode.
|
||||
# Note: unaligned chunk mode uses hugepages, so the required number
|
||||
# of pages must be available.
|
||||
#mem-unaligned: no
|
||||
# The following options configure the prefer-busy-polling socket
|
||||
# options. The polling time and budget can be edited here.
|
||||
# Possible values are:
|
||||
# - yes: enable (default)
|
||||
# - no: disable
|
||||
#enable-busy-poll: yes
|
||||
# busy-poll-time sets the approximate time in microseconds to busy
|
||||
# poll on a blocking receive when there is no data.
|
||||
#busy-poll-time: 20
|
||||
# busy-poll-budget is the budget allowed for packet batches
|
||||
#busy-poll-budget: 64
|
||||
# These two tunables are used to configure the Linux OS's NAPI
|
||||
# context. Their purpose is to defer enabling of interrupts and
|
||||
# instead schedule the NAPI context from a watchdog timer.
|
||||
# The softirq NAPI will exit early, allowing busy polling to be
|
||||
# performed. Successfully setting these tunables alongside busy-polling
|
||||
# should improve performance.
|
||||
# Defaults are:
|
||||
#gro-flush-timeout: 2000000
|
||||
#napi-defer-hard-irq: 2
|
||||
|
||||
dpdk:
|
||||
eal-params:
|
||||
proc-type: primary
|
||||
|
||||
# DPDK capture support
|
||||
# RX queues (and TX queues in IPS mode) are assigned to cores in 1:1 ratio
|
||||
interfaces:
|
||||
- interface: 0000:3b:00.0 # PCIe address of the NIC port
|
||||
# Threading: possible values are either "auto" or number of threads
|
||||
# - auto takes all cores
|
||||
# in IPS mode it is required to specify the number of cores and the numbers on both interfaces must match
|
||||
threads: auto
|
||||
promisc: true # promiscuous mode - capture all packets
|
||||
multicast: true # enables also detection on multicast packets
|
||||
checksum-checks: true # if Suricata should validate checksums
|
||||
checksum-checks-offload: true # if possible offload checksum validation to the NIC (saves Suricata resources)
|
||||
mtu: 1500 # Set MTU of the device in bytes
|
||||
# rss-hash-functions: 0x0 # advanced configuration option, use only if you use untested NIC card and experience RSS warnings,
|
||||
# For `rss-hash-functions` use hexadecimal 0x01ab format to specify RSS hash function flags - DumpRssFlags can help (you can see output if you use -vvv option during Suri startup)
|
||||
# setting auto to rss_hf sets the default RSS hash functions (based on IP addresses)
|
||||
|
||||
# To approximately calculate required amount of space (in bytes) for interface's mempool: mempool-size * mtu
|
||||
# Make sure you have enough allocated hugepages.
|
||||
# The optimum size for the packet memory pool (in terms of memory usage) is power of two minus one: n = (2^q - 1)
|
||||
mempool-size: 65535 # The number of elements in the mbuf pool
|
||||
|
||||
# Mempool cache size must be lower or equal to:
|
||||
# - RTE_MEMPOOL_CACHE_MAX_SIZE (by default 512) and
|
||||
# - "mempool-size / 1.5"
|
||||
# It is advised to choose cache_size to have "mempool-size modulo cache_size == 0".
|
||||
# If this is not the case, some elements will always stay in the pool and will never be used.
|
||||
# The cache can be disabled if the cache_size argument is set to 0, can be useful to avoid losing objects in cache
|
||||
# If the value is empty or set to "auto", Suricata will attempt to set cache size of the mempool to a value
|
||||
# that matches the previously mentioned recommendations
|
||||
mempool-cache-size: 257
|
||||
rx-descriptors: 1024
|
||||
tx-descriptors: 1024
|
||||
#
|
||||
# IPS mode for Suricata works in 3 modes - none, tap, ips
|
||||
# - none: IDS mode only - disables IPS functionality (does not further forward packets)
|
||||
# - tap: forwards all packets and generates alerts (omits DROP action) This is not DPDK TAP
|
||||
# - ips: the same as tap mode but it also drops packets that are flagged by rules to be dropped
|
||||
copy-mode: none
|
||||
copy-iface: none # or PCIe address of the second interface
|
||||
|
||||
- interface: default
|
||||
threads: auto
|
||||
promisc: true
|
||||
multicast: true
|
||||
checksum-checks: true
|
||||
checksum-checks-offload: true
|
||||
mtu: 1500
|
||||
rss-hash-functions: auto
|
||||
mempool-size: 65535
|
||||
mempool-cache-size: 257
|
||||
rx-descriptors: 1024
|
||||
tx-descriptors: 1024
|
||||
copy-mode: none
|
||||
copy-iface: none
|
||||
|
||||
|
||||
# Cross platform libpcap capture support
|
||||
pcap:
|
||||
- interface: eth0
|
||||
|
@ -706,27 +852,40 @@ pcap-file:
|
|||
## Step 4: App Layer Protocol configuration
|
||||
##
|
||||
|
||||
# Configure the app-layer parsers. The protocol's section details each
|
||||
# protocol.
|
||||
# Configure the app-layer parsers.
|
||||
#
|
||||
# The error-policy setting applies to all app-layer parsers. Values can be
|
||||
# "drop-flow", "pass-flow", "bypass", "drop-packet", "pass-packet", "reject" or
|
||||
# "ignore" (the default).
|
||||
#
|
||||
# The protocol's section details each protocol.
|
||||
#
|
||||
# The option "enabled" takes 3 values - "yes", "no", "detection-only".
|
||||
# "yes" enables both detection and the parser, "no" disables both, and
|
||||
# "detection-only" enables protocol detection only (parser disabled).
|
||||
app-layer:
|
||||
# error-policy: ignore
|
||||
protocols:
|
||||
telnet:
|
||||
enabled: yes
|
||||
rfb:
|
||||
enabled: yes
|
||||
detection-ports:
|
||||
dp: 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909
|
||||
# MQTT, disabled by default.
|
||||
mqtt:
|
||||
enabled: yes
|
||||
max-msg-length: 1mb
|
||||
# max-msg-length: 1mb
|
||||
# subscribe-topic-match-limit: 100
|
||||
# unsubscribe-topic-match-limit: 100
|
||||
# Maximum number of live MQTT transactions per flow
|
||||
# max-tx: 4096
|
||||
krb5:
|
||||
enabled: yes
|
||||
bittorrent-dht:
|
||||
enabled: yes
|
||||
snmp:
|
||||
enabled: yes
|
||||
ikev2:
|
||||
ike:
|
||||
enabled: yes
|
||||
tls:
|
||||
enabled: yes
|
||||
|
@ -751,8 +910,16 @@ app-layer:
|
|||
#
|
||||
#encryption-handling: default
|
||||
|
||||
pgsql:
|
||||
enabled: yes
|
||||
# Stream reassembly size for PostgreSQL. By default, track it completely.
|
||||
stream-depth: 0
|
||||
# Maximum number of live PostgreSQL transactions per flow
|
||||
# max-tx: 1024
|
||||
dcerpc:
|
||||
enabled: yes
|
||||
# Maximum number of live DCERPC transactions per flow
|
||||
# max-tx: 1024
|
||||
ftp:
|
||||
enabled: yes
|
||||
# memcap: 64mb
|
||||
|
@ -761,9 +928,12 @@ app-layer:
|
|||
ssh:
|
||||
enabled: yes
|
||||
hassh: yes
|
||||
# HTTP2: Experimental HTTP 2 support. Disabled by default.
|
||||
http2:
|
||||
enabled: no
|
||||
enabled: yes
|
||||
# Maximum number of live HTTP2 streams in a flow
|
||||
#max-streams: 4096
|
||||
# Maximum headers table size
|
||||
#max-table-size: 65536
|
||||
smtp:
|
||||
enabled: yes
|
||||
raw-extraction: no
|
||||
|
@ -785,6 +955,12 @@ app-layer:
|
|||
|
||||
# Extract URLs and save in state data structure
|
||||
extract-urls: yes
|
||||
# Scheme of URLs to extract
|
||||
# (default is [http])
|
||||
extract-urls-schemes: [http, https, ftp, mailto]
|
||||
# Log the scheme of URLs that are extracted
|
||||
# (default is no)
|
||||
log-url-scheme: yes
|
||||
# Set to yes to compute the md5 of the mail body. You will then
|
||||
# be able to journalize it.
|
||||
body-md5: yes
|
||||
|
@ -799,12 +975,15 @@ app-layer:
|
|||
enabled: yes
|
||||
detection-ports:
|
||||
dp: 139, 445
|
||||
# Maximum number of live SMB transactions per flow
|
||||
# max-tx: 1024
|
||||
|
||||
# Stream reassembly size for SMB streams. By default track it completely.
|
||||
#stream-depth: 0
|
||||
|
||||
nfs:
|
||||
enabled: yes
|
||||
# max-tx: 1024
|
||||
tftp:
|
||||
enabled: yes
|
||||
dns:
|
||||
|
@ -818,6 +997,12 @@ app-layer:
|
|||
dp: 53
|
||||
http:
|
||||
enabled: yes
|
||||
|
||||
# Byte Range Containers default settings
|
||||
# byterange:
|
||||
# memcap: 100mb
|
||||
# timeout: 60
|
||||
|
||||
# memcap: Maximum memory capacity for HTTP
|
||||
# Default is unlimited, values can be 64mb, e.g.
|
||||
|
||||
|
@ -861,7 +1046,7 @@ app-layer:
|
|||
# auto will use http-body-inline mode in IPS mode, yes or no set it statically
|
||||
http-body-inline: auto
|
||||
|
||||
# Decompress SWF files.
|
||||
# Decompress SWF files. Disabled by default.
|
||||
# Two types: 'deflate', 'lzma', 'both' will decompress deflate and lzma
|
||||
# compress-depth:
|
||||
# Specifies the maximum amount of data to decompress,
|
||||
|
@ -870,10 +1055,10 @@ app-layer:
|
|||
# Specifies the maximum amount of decompressed data to obtain,
|
||||
# set 0 for unlimited.
|
||||
swf-decompression:
|
||||
enabled: yes
|
||||
enabled: no
|
||||
type: both
|
||||
compress-depth: 0
|
||||
decompress-depth: 0
|
||||
compress-depth: 100kb
|
||||
decompress-depth: 100kb
|
||||
|
||||
# Use a random value for inspection sizes around the specified value.
|
||||
# This lowers the risk of some evasion techniques but could lead
|
||||
|
@ -897,6 +1082,8 @@ app-layer:
|
|||
# Maximum decompressed size with a compression ratio
|
||||
# above 2048 (only LZMA can reach this ratio, deflate cannot)
|
||||
#compression-bomb-limit: 1mb
|
||||
# Maximum time spent decompressing a single transaction in usec
|
||||
#decompression-time-limit: 100000
|
||||
|
||||
server-config:
|
||||
|
||||
|
@ -952,7 +1139,7 @@ app-layer:
|
|||
|
||||
# SCADA EtherNet/IP and CIP protocol support
|
||||
enip:
|
||||
enabled: no
|
||||
enabled: yes
|
||||
detection-ports:
|
||||
dp: 44818
|
||||
sp: 44818
|
||||
|
@ -960,6 +1147,9 @@ app-layer:
|
|||
ntp:
|
||||
enabled: yes
|
||||
|
||||
quic:
|
||||
enabled: yes
|
||||
|
||||
dhcp:
|
||||
enabled: no
|
||||
|
||||
|
@ -970,12 +1160,23 @@ app-layer:
|
|||
asn1-max-frames: 256
|
||||
|
||||
# Datasets default settings
|
||||
# datasets:
|
||||
# # Default fallback memcap and hashsize values for datasets in case these
|
||||
# # were not explicitly defined.
|
||||
# defaults:
|
||||
# memcap: 100mb
|
||||
# hashsize: 2048
|
||||
datasets:
|
||||
# Default fallback memcap and hashsize values for datasets in case these
|
||||
# were not explicitly defined.
|
||||
defaults:
|
||||
#memcap: 100mb
|
||||
#hashsize: 2048
|
||||
|
||||
rules:
|
||||
# Set to true to allow absolute filenames and filenames that use
|
||||
# ".." components to reference parent directories in rules that specify
|
||||
# their filenames.
|
||||
#allow-absolute-filenames: false
|
||||
|
||||
# Allow datasets in rules write access for "save" and
|
||||
# "state". This is enabled by default, however write access is
|
||||
# limited to the data directory.
|
||||
#allow-write: true
|
||||
|
||||
##############################################################################
|
||||
##
|
||||
|
@ -992,6 +1193,27 @@ run-as:
|
|||
user: suri
|
||||
group: suri
|
||||
|
||||
security:
|
||||
# if true, prevents process creation from Suricata by calling
|
||||
# setrlimit(RLIMIT_NPROC, 0)
|
||||
limit-noproc: true
|
||||
# Use landlock security module under Linux
|
||||
landlock:
|
||||
enabled: no
|
||||
directories:
|
||||
#write:
|
||||
# - /var/run/
|
||||
# /usr and /etc folders are added to read list to allow
|
||||
# file magic to be used.
|
||||
read:
|
||||
- /usr/
|
||||
- /etc/
|
||||
- /etc/suricata/
|
||||
|
||||
lua:
|
||||
# Allow Lua rules. Disabled by default.
|
||||
#allow-rules: false
|
||||
|
||||
# Some logging modules will use that name in event as identifier. The default
|
||||
# value is the hostname
|
||||
#sensor-name: suricata
|
||||
|
@ -1046,6 +1268,8 @@ host-mode: auto
|
|||
#
|
||||
# hash - Flow assigned to threads using the 5-7 tuple hash.
|
||||
# ippair - Flow assigned to threads using addresses only.
|
||||
# ftp-hash - Flow assigned to threads using the hash, except for FTP, so that
|
||||
# ftp-data flows will be handled by the same thread
|
||||
#
|
||||
#autofp-scheduler: hash
|
||||
|
||||
|
@ -1061,12 +1285,12 @@ host-mode: auto
|
|||
# activated in live capture mode. You can use the filename variable to set
|
||||
# the file name of the socket.
|
||||
unix-command:
|
||||
enabled: yes
|
||||
enabled: auto
|
||||
#filename: custom.socket
|
||||
|
||||
# Magic file. The extension .mgc is added to the value here.
|
||||
#magic-file: /usr/share/file/magic
|
||||
magic-file: /usr/share/misc/magic.mgc
|
||||
#magic-file:
|
||||
|
||||
# GeoIP2 database file. Specify path and filename of GeoIP2 database
|
||||
# if using rules with "geoip" rule option.
|
||||
|
@ -1087,6 +1311,22 @@ legacy:
|
|||
# - reject
|
||||
# - alert
|
||||
|
||||
# Define maximum number of possible alerts that can be triggered for the same
|
||||
# packet. Default is 15
|
||||
#packet-alert-max: 15
|
||||
|
||||
# Exception Policies
|
||||
#
|
||||
# Define a common behavior for all exception policies.
|
||||
# In IPS mode, the default is drop-flow. For cases when that's not possible, the
|
||||
# engine will fall to drop-packet. To fallback to old behavior (setting each of
|
||||
# them individually, or ignoring all), set this to ignore.
|
||||
# All values available for exception policies can be used, and there is one
|
||||
# extra option: auto - which means drop-flow or drop-packet (as explained above)
|
||||
# in IPS mode, and ignore in IDS mode. Exception policy values are: drop-packet,
|
||||
# drop-flow, reject, bypass, pass-packet, pass-flow, ignore (disable).
|
||||
exception-policy: auto
|
||||
|
||||
# IP Reputation
|
||||
#reputation-categories-file: /etc/suricata/iprep/categories.txt
|
||||
#default-reputation-path: /etc/suricata/iprep
|
||||
|
@ -1134,8 +1374,11 @@ host-os-policy:
|
|||
|
||||
# Defrag settings:
|
||||
|
||||
# The memcap-policy value can be "drop-packet", "pass-packet", "reject" or
|
||||
# "ignore" (which is the default).
|
||||
defrag:
|
||||
memcap: 32mb
|
||||
# memcap-policy: ignore
|
||||
hash-size: 65536
|
||||
trackers: 65535 # number of defragmented flows to follow
|
||||
max-frags: 65535 # number of fragments to keep (higher than trackers)
|
||||
|
@ -1175,9 +1418,12 @@ defrag:
|
|||
# last time seen flows.
|
||||
# The memcap can be specified in kb, mb, gb. Just a number indicates it's
|
||||
# in bytes.
|
||||
# The memcap-policy can be "drop-packet", "pass-packet", "reject" or "ignore"
|
||||
# (which is the default).
|
||||
|
||||
flow:
|
||||
memcap: 128mb
|
||||
#memcap-policy: ignore
|
||||
hash-size: 65536
|
||||
prealloc: 10000
|
||||
emergency-recovery: 30
|
||||
|
@ -1191,6 +1437,12 @@ flow:
|
|||
vlan:
|
||||
use-for-tracking: true
|
||||
|
||||
# This option controls the use of livedev ids in the flow (and defrag)
|
||||
# hashing. This is enabled by default and should be disabled if
|
||||
# multiple live devices are used to capture traffic from the same network
|
||||
livedev:
|
||||
use-for-tracking: true
|
||||
|
||||
# Specific timeouts for flows. Here you can specify the timeouts that the
|
||||
# active flows will wait to transit from the current state to another, on each
|
||||
# protocol. The value of "new" determines the seconds to wait after a handshake or
|
||||
|
@ -1248,8 +1500,11 @@ flow-timeouts:
|
|||
# engine is configured.
|
||||
#
|
||||
# stream:
|
||||
# memcap: 32mb # Can be specified in kb, mb, gb. Just a
|
||||
# memcap: 64mb # Can be specified in kb, mb, gb. Just a
|
||||
# # number indicates it's in bytes.
|
||||
# memcap-policy: ignore # Can be "drop-flow", "pass-flow", "bypass",
|
||||
# # "drop-packet", "pass-packet", "reject" or
|
||||
# # "ignore" default is "ignore"
|
||||
# checksum-validation: yes # To validate the checksum of received
|
||||
# # packet. If csum validation is specified as
|
||||
# # "yes", then packets with invalid csum values will not
|
||||
|
@ -1259,19 +1514,28 @@ flow-timeouts:
|
|||
# # of checksum. You can control the handling of checksum
|
||||
# # on a per-interface basis via the 'checksum-checks'
|
||||
# # option
|
||||
# prealloc-sessions: 2k # 2k sessions prealloc'd per stream thread
|
||||
# prealloc-sessions: 2048 # 2k sessions prealloc'd per stream thread
|
||||
# midstream: false # don't allow midstream session pickups
|
||||
# midstream-policy: ignore # Can be "drop-flow", "pass-flow", "bypass",
|
||||
# # "drop-packet", "pass-packet", "reject" or
|
||||
# # "ignore" default is "ignore"
|
||||
# async-oneside: false # don't enable async stream handling
|
||||
# inline: no # stream inline mode
|
||||
# drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine
|
||||
# max-syn-queued: 10 # Max different SYNs to queue
|
||||
# max-synack-queued: 5 # Max different SYN/ACKs to queue
|
||||
# bypass: no # Bypass packets when stream.reassembly.depth is reached.
|
||||
# # Warning: first side to reach this triggers
|
||||
# # the bypass.
|
||||
# liberal-timestamps: false # Treat all timestamps as if the Linux policy applies. This
|
||||
# # means it's slightly more permissive. Enabled by default.
|
||||
#
|
||||
# reassembly:
|
||||
# memcap: 64mb # Can be specified in kb, mb, gb. Just a number
|
||||
# memcap: 256mb # Can be specified in kb, mb, gb. Just a number
|
||||
# # indicates it's in bytes.
|
||||
# memcap-policy: ignore # Can be "drop-flow", "pass-flow", "bypass",
|
||||
# # "drop-packet", "pass-packet", "reject" or
|
||||
# # "ignore" default is "ignore"
|
||||
# depth: 1mb # Can be specified in kb, mb, gb. Just a number
|
||||
# # indicates it's in bytes.
|
||||
# toserver-chunk-size: 2560 # inspect raw stream in chunks of at least
|
||||
|
@ -1305,10 +1569,14 @@ flow-timeouts:
|
|||
#
|
||||
stream:
|
||||
memcap: 64mb
|
||||
#memcap-policy: ignore
|
||||
checksum-validation: yes # reject incorrect csums
|
||||
#midstream: false
|
||||
#midstream-policy: ignore
|
||||
inline: auto # auto will use inline mode in IPS mode, yes or no set it statically
|
||||
reassembly:
|
||||
memcap: 256mb
|
||||
#memcap-policy: ignore
|
||||
depth: 1mb # reassemble 1mb into a stream
|
||||
toserver-chunk-size: 2560
|
||||
toclient-chunk-size: 2560
|
||||
|
@ -1359,6 +1627,9 @@ decoder:
|
|||
enabled: true
|
||||
ports: $GENEVE_PORTS # syntax: '[6081, 1234]' or '6081'.
|
||||
|
||||
# maximum number of decoder layers for a packet
|
||||
# max-layers: 16
|
||||
|
||||
##
|
||||
## Performance tuning and profiling
|
||||
##
|
||||
|
@ -1492,6 +1763,14 @@ threading:
|
|||
# thread will always be created.
|
||||
#
|
||||
detect-thread-ratio: 1.0
|
||||
#
|
||||
# By default, the per-thread stack size is left to its default setting. If
|
||||
# the default thread stack size is too small, use the following configuration
|
||||
# setting to change the size. Note that if any thread's stack size cannot be
|
||||
# set to this value, a fatal error occurs.
|
||||
#
|
||||
# Generally, the per-thread stack-size should not exceed 8MB.
|
||||
#stack-size: 8mb
|
||||
|
||||
# Luajit has a strange memory requirement, its 'states' need to be in the
|
||||
# first 2G of the process' memory.
|
||||
|
@ -1507,9 +1786,9 @@ luajit:
|
|||
#
|
||||
profiling:
|
||||
# Run profiling for every X-th packet. The default is 1, which means we
|
||||
# profile every packet. If set to 1000, one packet is profiled for every
|
||||
# 1000 received.
|
||||
#sample-rate: 1000
|
||||
# profile every packet. If set to 1024, one packet is profiled for every
|
||||
# 1024 received. The sample rate must be a power of 2.
|
||||
#sample-rate: 1024
|
||||
|
||||
# rule profiling
|
||||
rules:
|
||||
|
@ -1594,7 +1873,7 @@ profiling:
|
|||
# accept the packet if Suricata is not able to keep pace.
|
||||
# bypass mark and mask can be used to implement NFQ bypass. If bypass mark is
|
||||
# set then the NFQ bypass is activated. Suricata will set the bypass mark/mask
|
||||
# on packet of a flow that need to be bypassed. The Nefilter ruleset has to
|
||||
# on packet of a flow that need to be bypassed. The Netfilter ruleset has to
|
||||
# directly accept all packets of a flow once a packet has been marked.
|
||||
nfq:
|
||||
# mode: accept
|
||||
|
@ -1634,7 +1913,7 @@ capture:
|
|||
#disable-offloading: false
|
||||
#
|
||||
# disable checksum validation. Same as setting '-k none' on the
|
||||
# commandline.
|
||||
# command-line.
|
||||
#checksum-validation: none
|
||||
|
||||
# Netmap support
|
||||
|
@ -1703,7 +1982,13 @@ pfring:
|
|||
cluster-id: 99
|
||||
|
||||
# Default PF_RING cluster type. PF_RING can load balance per flow.
|
||||
# Possible values are cluster_flow or cluster_round_robin.
|
||||
# Possible values are:
|
||||
# - cluster_flow: 6-tuple: <src ip, src_port, dst ip, dst port, proto, vlan>
|
||||
# - cluster_inner_flow: 6-tuple: <src ip, src port, dst ip, dst port, proto, vlan>
|
||||
# - cluster_inner_flow_2_tuple: 2-tuple: <src ip, dst ip >
|
||||
# - cluster_inner_flow_4_tuple: 4-tuple: <src ip, src port, dst ip, dst port >
|
||||
# - cluster_inner_flow_5_tuple: 5-tuple: <src ip, src port, dst ip, dst port, proto >
|
||||
# - cluster_round_robin (NOT RECOMMENDED)
|
||||
cluster-type: cluster_flow
|
||||
|
||||
# bpf filter for this interface
|
||||
|
@ -1762,12 +2047,6 @@ ipfw:
|
|||
|
||||
|
||||
napatech:
|
||||
# The Host Buffer Allowance for all streams
|
||||
# (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back)
|
||||
# This may be enabled when sharing streams with another application.
|
||||
# Otherwise, it should be turned off.
|
||||
#hba: -1
|
||||
|
||||
# When use_all_streams is set to "yes" the initialization code will query
|
||||
# the Napatech service for all configured streams and listen on all of them.
|
||||
# When set to "no" the streams config array will be used.
|
||||
|
@ -1863,6 +2142,7 @@ napatech:
|
|||
##
|
||||
|
||||
default-rule-path: /var/lib/suricata/rules
|
||||
|
||||
rule-files:
|
||||
- suricata.rules
|
||||
|
||||
|
@ -1870,7 +2150,7 @@ rule-files:
|
|||
## Auxiliary configuration files.
|
||||
##
|
||||
|
||||
classification-file: /var/lib/suricata/rules/classification.config
|
||||
classification-file: /etc/suricata/classification.config
|
||||
reference-config-file: /etc/suricata/reference.config
|
||||
# threshold-file: /etc/suricata/threshold.config
|
||||
|
||||
|
@ -1882,6 +2162,6 @@ reference-config-file: /etc/suricata/reference.config
|
|||
# in this configuration file. Files with relative pathnames will be
|
||||
# searched for in the same directory as this configuration file. You may
|
||||
# use absolute pathnames too.
|
||||
# You can specify more than 2 configuration files, if needed.
|
||||
#include: include1.yaml
|
||||
#include: include2.yaml
|
||||
#include:
|
||||
# - include1.yaml
|
||||
# - include2.yaml
|
||||
|
|
2167
docker/suricata/dist/suricata_new.yaml
vendored
Normal file
2167
docker/suricata/dist/suricata_new.yaml
vendored
Normal file
File diff suppressed because it is too large
Load diff
1887
docker/suricata/dist/suricata_old.yaml
vendored
Normal file
1887
docker/suricata/dist/suricata_old.yaml
vendored
Normal file
File diff suppressed because it is too large
Load diff
|
@ -19,4 +19,4 @@ services:
|
|||
- NET_RAW
|
||||
image: "dtagdevsec/suricata:alpha"
|
||||
volumes:
|
||||
- /data/suricata/log:/var/log/suricata
|
||||
- $HOME/tpotce/data/suricata/log:/var/log/suricata
|
||||
|
|
2
docker/tpotinit/dist/bin/clean.sh
vendored
2
docker/tpotinit/dist/bin/clean.sh
vendored
|
@ -193,7 +193,7 @@ fuFATT () {
|
|||
# Let's create a function to clean up and prepare glastopf data
|
||||
fuGLUTTON () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/glutton/*; fi
|
||||
mkdir -vp /data/glutton/log
|
||||
mkdir -vp /data/glutton/{log,payloads}
|
||||
chmod 770 /data/glutton -R
|
||||
chown tpot:tpot /data/glutton -R
|
||||
}
|
||||
|
|
|
@ -82,14 +82,6 @@ TPOT_ATTACKMAP_TEXT_TIMEZONE=UTC
|
|||
# Some services / tools offer adjustments using ENVs which can be adjusted here.
|
||||
###################################################################################
|
||||
|
||||
# SentryPeer P2P mode
|
||||
# Exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
|
||||
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
|
||||
# the bad actors in its logs. Therefore this option is opt-in based.
|
||||
# 0: This is the default, P2P mode is disabled.
|
||||
# 1: Enable P2P mode.
|
||||
SENTRYPEER_PEER_TO_PEER=0
|
||||
|
||||
# Suricata ET Pro ruleset
|
||||
# OPEN: This is the default and will the ET Open ruleset
|
||||
# OINKCODE: Replace OPEN with your Oinkcode to use the ET Pro ruleset
|
||||
|
|
Loading…
Reference in a new issue