finalize elk6.x docker images

continue working on elk6.x helper scripts
cleaning up
This commit is contained in:
Marco Ochse 2018-04-20 21:22:46 +00:00
parent fd40fc96a6
commit 89d31ffbe0
14 changed files with 56 additions and 759 deletions

View file

@ -6,18 +6,11 @@ Thank you for your decision to contribute to T-Pot.
Please feel free to post your problems, ideas and issues [here](https://github.com/dtag-dev-sec/tpotce/issues). We will try to answer ASAP, but to speed things up we encourage you to ...
- [ ] Use the [search function](https://github.com/dtag-dev-sec/tpotce/issues?utf8=%E2%9C%93&q=) first
- [ ] Check the [FAQ](#faq)
- [ ] Check the FAQs in our [WIKI](https://github.com/dtag-dev-sec/tpotce/wiki)
- [ ] Provide [basic support information](#info) with regard to your issue
Thank you :smiley:
-
<a name="faq"></a>
### FAQ
##### Where can I find the honeypot logs?
###### The honeypot logs are located in `/data/`. You have to login via ssh and run `sudo su -` and then `cd /data/`. Do not change any permissions here or T-Pot will fail to work.
-
@ -25,13 +18,13 @@ Thank you :smiley:
<a name="info"></a>
### Basic support information
- What T-Pot version are you currtently using?
- What T-Pot version are you currently using?
- Are you running on a Intel NUC or a VM?
- How long has your installation been running?
- Did you install any upgrades or packages?
- Did you modify any scripts?
- Have you turned persistence on/off?
- How much RAM available (login via ssh and run `htop`)?
- How much RAM is available (login via ssh and run `htop`)?
- How much stress are the CPUs under (login via ssh and run `htop`)?
- How much swap space is being used (login via ssh and run `htop`)?
- How much free disk space is available (login via ssh and run `sudo df -h`)?

View file

@ -1,14 +1,6 @@
# T-Pot 17.10
# T-Pot 18.04
This repository contains the necessary files to create the **[T-Pot](https://github.com/dtag-dev-sec/tpotce/releases)** ISO image.
The image can then be used to install T-Pot on a physical or virtual machine.
In October 2016 we released
[T-Pot 16.10](http://dtag-dev-sec.github.io/mediator/feature/2016/10/31/t-pot-16.10.html)
# T-Pot 17.10
T-Pot 17.10 runs on the latest 16.04 LTS Ubuntu Server Network Installer image, is based on
T-Pot 18.04 runs on the latest 18.04 LTS Ubuntu Server Network Installer image, is based on
[docker](https://www.docker.com/), [docker-compose](https://docs.docker.com/compose/)

View file

@ -16,6 +16,7 @@ fi
# Set vars
myDATE=$(date +%Y%m%d%H%M)
myINDEXCOUNT=$(curl -s -XGET ''$myKIBANA'api/saved_objects/index-pattern' | jq '.saved_objects[].attributes' | tr '\\' '\n' | grep "scripted" | wc -w)
myINDEXID=$(curl -s -XGET ''$myKIBANA'api/saved_objects/index-pattern' | jq '.saved_objects[].id' | tr -d '"')
myDASHBOARDS=$(curl -s -XGET ''$myKIBANA'api/saved_objects/dashboard?per_page=200' | jq '.saved_objects[].id' | tr -d '"')
myVISUALIZATIONS=$(curl -s -XGET ''$myKIBANA'api/saved_objects/visualization?per_page=200' | jq '.saved_objects[].id' | tr -d '"')
mySEARCHES=$(curl -s -XGET ''$myKIBANA'api/saved_objects/search?per_page=200' | jq '.saved_objects[].id' | tr -d '"')
@ -31,7 +32,7 @@ trap fuCLEANUP EXIT
# Export index patterns
mkdir -p patterns
echo $myCOL1"### Now exporting"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
curl -s -XGET ''$myKIBANA'api/saved_objects/index-pattern' | jq '.saved_objects[] | {attributes}' > patterns/index-patterns.json
curl -s -XGET ''$myKIBANA'api/saved_objects/index-pattern/'$myINDEXID'' | jq '. | {attributes}' > patterns/$myINDEXID.json &
echo
# Export dashboards
@ -40,7 +41,7 @@ echo $myCOL1"### Now exporting"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"das
for i in $myDASHBOARDS;
do
echo $myCOL1"###### "$i $myCOL0
curl -s -XGET ''$myKIBANA'api/saved_objects/dashboard/'$i'' | jq '. | {attributes}' > dashboards/$i.json
curl -s -XGET ''$myKIBANA'api/saved_objects/dashboard/'$i'' | jq '. | {attributes}' > dashboards/$i.json &
done;
echo
@ -50,7 +51,7 @@ echo $myCOL1"### Now exporting"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1
for i in $myVISUALIZATIONS;
do
echo $myCOL1"###### "$i $myCOL0
curl -s -XGET ''$myKIBANA'api/saved_objects/visualization/'$i'' | jq '. | {attributes}' > visualizations/$i.json
curl -s -XGET ''$myKIBANA'api/saved_objects/visualization/'$i'' | jq '. | {attributes}' > visualizations/$i.json &
done;
echo
@ -60,10 +61,13 @@ echo $myCOL1"### Now exporting"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searc
for i in $mySEARCHES;
do
echo $myCOL1"###### "$i $myCOL0
curl -s -XGET ''$myKIBANA'api/saved_objects/search/'$i'' | jq '. | {attributes}' > searches/$i.json
curl -s -XGET ''$myKIBANA'api/saved_objects/search/'$i'' | jq '. | {attributes}' > searches/$i.json &
done;
echo
# Wait for background exports to finish
wait
# Building tar archive
echo $myCOL1"### Now building archive"$myCOL0 "kibana-objects_"$myDATE".tgz"
tar cvfz kibana-objects_$myDATE.tgz patterns dashboards visualizations searches > /dev/null

View file

@ -2,6 +2,7 @@
# Import Kibana objects
# Make sure ES is available
myES="http://127.0.0.1:64298/"
myKIBANA="http://127.0.0.1:64296/"
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
if ! [ "$myESSTATUS" = "1" ]
then
@ -41,10 +42,12 @@ fi
tar xvfz $myDUMP > /dev/null
# Restore index patterns
myINDEXCOUNT=$(cat patterns/index-patterns.json | tr '\\' '\n' | grep "scripted" | wc -w)
myINDEXID=$(ls patterns/*.json | cut -c 10- | rev | cut -c 6- | rev)
myINDEXCOUNT=$(cat patterns/$myINDEXID.json | tr '\\' '\n' | grep "scripted" | wc -w)
echo $myCOL1"### Now importing"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
curl -s -XDELETE ''$myES'.kibana/index-pattern/logstash-*' > /dev/null
curl -s -XPUT ''$myES'.kibana/index-pattern/logstash-*' -T patterns/index-patterns.json > /dev/null
curl -s -XDELETE ''$myKIBANA'api/saved_objects/index-pattern/logstash-*' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null
curl -s -XDELETE ''$myKIBANA'api/saved_objects/index-pattern/'$myINDEXID'' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null
curl -s -XPOST ''$myKIBANA'api/saved_objects/index-pattern/'$myINDEXID'' -H "Content-Type: application/json" -H "kbn-xsrf: true" -d @patterns/$myINDEXID.json > /dev/null &
echo
# Restore dashboards
@ -52,10 +55,15 @@ myDASHBOARDS=$(ls dashboards/*.json | cut -c 12- | rev | cut -c 6- | rev)
echo $myCOL1"### Now importing "$myCOL0$(echo $myDASHBOARDS | wc -w)$myCOL1 "dashboards." $myCOL0
for i in $myDASHBOARDS;
do
echo $myCOL1"###### "$i $myCOL0
curl -s -XDELETE ''$myES'.kibana/dashboard/'$i'' > /dev/null
curl -s -XPUT ''$myES'.kibana/dashboard/'$i'' -T dashboards/$i.json > /dev/null
curl -s -XDELETE ''$myKIBANA'api/saved_objects/dashboard/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null &
done;
wait
for i in $myDASHBOARDS;
do
echo $myCOL1"###### "$i $myCOL0
curl -s -XPOST ''$myKIBANA'api/saved_objects/dashboard/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" -d @dashboards/$i.json > /dev/null &
done;
wait
echo
# Restore visualizations
@ -63,22 +71,32 @@ myVISUALIZATIONS=$(ls visualizations/*.json | cut -c 16- | rev | cut -c 6- | rev
echo $myCOL1"### Now importing "$myCOL0$(echo $myVISUALIZATIONS | wc -w)$myCOL1 "visualizations." $myCOL0
for i in $myVISUALIZATIONS;
do
echo $myCOL1"###### "$i $myCOL0
curl -s -XDELETE ''$myES'.kibana/visualization/'$i'' > /dev/null
curl -s -XPUT ''$myES'.kibana/visualization/'$i'' -T visualizations/$i.json > /dev/null
curl -s -XDELETE ''$myKIBANA'api/saved_objects/visualization/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null &
done;
wait
for i in $myVISUALIZATIONS;
do
echo $myCOL1"###### "$i $myCOL0
curl -s -XPOST ''$myKIBANA'api/saved_objects/visualization/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" -d @visualizations/$i.json > /dev/null &
done;
wait
echo
# Restore searches
mySEARCHES=$(ls searches/*.json | cut -c 10- | rev | cut -c 6- | rev)
echo $myCOL1"### Now importing "$myCOL0$(echo $mySEARCHES | wc -w)$myCOL1 "searches." $myCOL0
for i in $mySEARCHES;
do
curl -s -XDELETE ''$myKIBANA'api/saved_objects/search/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null &
done;
wait
for i in $mySEARCHES;
do
echo $myCOL1"###### "$i $myCOL0
curl -s -XDELETE ''$myES'.kibana/search/'$i'' > /dev/null
curl -s -XPUT ''$myES'.kibana/search/'$i'' -T searches/$i.json > /dev/null
curl -s -XPOST ''$myKIBANA'api/saved_objects/search/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" -d @searches/$i.json > /dev/null &
done;
echo
wait
# Stats
echo

View file

@ -7,7 +7,6 @@ services:
# ELK services
## Elasticsearch service
elasticsearch:
build: .
container_name: elasticsearch
restart: always
environment:
@ -31,7 +30,6 @@ services:
## Kibana service
kibana:
build: .
container_name: kibana
restart: always
stop_signal: SIGKILL
@ -44,7 +42,6 @@ services:
## Logstash service
logstash:
build: .
container_name: logstash
restart: always
depends_on:
@ -60,7 +57,6 @@ services:
## Elasticsearch-head service
head:
build: .
container_name: head
restart: always
depends_on:

View file

@ -1,39 +0,0 @@
FROM alpine
# Include dist
ADD dist/ /root/dist/
# Setup env and apt
RUN apk -U upgrade && \
apk add bash \
curl \
openjdk8-jre \
procps \
wget && \
# Get and install packages
cd /root/dist/ && \
mkdir -p /usr/share/elasticsearch/ && \
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.6.8.tar.gz && \
tar xvfz elasticsearch-5.6.8.tar.gz --strip-components=1 -C /usr/share/elasticsearch/ && \
# Add and move files
cd /root/dist/ && \
mkdir -p /usr/share/elasticsearch/config && \
cp elasticsearch.yml /usr/share/elasticsearch/config/ && \
# Setup user, groups and configs
addgroup -g 2000 elasticsearch && \
adduser -S -H -s /bin/bash -u 2000 -D -g 2000 elasticsearch && \
chown -R elasticsearch:elasticsearch /usr/share/elasticsearch/ && \
# Clean up
apk del --purge wget && \
rm -rf /root/*
# Healthcheck
HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:9200/_cat/health'
# Start ELK
USER elasticsearch:elasticsearch
CMD ["/usr/share/elasticsearch/bin/elasticsearch"]

View file

@ -1,30 +0,0 @@
# T-Pot (Standard)
# For docker-compose ...
version: '2.2'
services:
# ELK services
## Elasticsearch service
elasticsearch:
build: .
container_name: elasticsearch
restart: always
environment:
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
cap_add:
- IPC_LOCK
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
mem_limit: 2g
ports:
- "127.0.0.1:64298:9200"
image: "dtagdevsec/elasticsearch:1804"
volumes:
- /data:/data

View file

@ -25,8 +25,8 @@ RUN apk -U upgrade && \
# Add and move files
cd /root/dist/ && \
cp test.svg /usr/share/kibana/src/ui/public/images/kibana.svg && \
cp test.svg /usr/share/kibana/src/ui/public/icons/kibana.svg && \
cp kibana.svg /usr/share/kibana/src/ui/public/images/kibana.svg && \
cp kibana.svg /usr/share/kibana/src/ui/public/icons/kibana.svg && \
cp elk.ico /usr/share/kibana/src/ui/public/assets/favicons/favicon.ico && \
cp elk.ico /usr/share/kibana/src/ui/public/assets/favicons/favicon-16x16.png && \
cp elk.ico /usr/share/kibana/src/ui/public/assets/favicons/favicon-32x32.png && \

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 7 KiB

After

Width:  |  Height:  |  Size: 5.8 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 5.8 KiB

View file

@ -1,52 +0,0 @@
FROM alpine
# Include dist
ADD dist/ /root/dist/
# Setup env and apt
RUN apk -U upgrade && \
apk add bash \
curl \
git \
libc6-compat \
libzmq \
openjdk8-jre \
procps \
wget && \
# Get and install packages
git clone https://github.com/dtag-dev-sec/listbot /etc/listbot && \
cd /root/dist/ && \
mkdir -p /usr/share/logstash/ && \
wget https://artifacts.elastic.co/downloads/logstash/logstash-5.6.8.tar.gz && \
wget http://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN.tar.gz && \
tar xvfz logstash-5.6.8.tar.gz --strip-components=1 -C /usr/share/logstash/ && \
/usr/share/logstash/bin/logstash-plugin install logstash-filter-translate && \
/usr/share/logstash/bin/logstash-plugin install logstash-output-syslog && \
tar xvfz GeoLite2-ASN.tar.gz --strip-components=1 -C /usr/share/logstash/vendor/bundle/jruby/1.9/gems/logstash-filter-geoip-4.3.1-java/vendor/ && \
# Add and move files
cd /root/dist/ && \
cp update.sh /usr/bin/ && \
chmod u+x /usr/bin/update.sh && \
mkdir -p /etc/logstash/conf.d && \
cp logstash.conf /etc/logstash/conf.d/ && \
cp elasticsearch-template-es5x.json /usr/share/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-7.4.2-java/lib/logstash/outputs/elasticsearch/ && \
# Setup user, groups and configs
addgroup -g 2000 logstash && \
adduser -S -H -s /bin/bash -u 2000 -D -g 2000 logstash && \
chown -R logstash:logstash /usr/share/logstash && \
chown -R logstash:logstash /etc/listbot && \
chmod 755 /usr/bin/update.sh && \
# Clean up
apk del --purge wget && \
rm -rf /root/*
# Healthcheck
HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:9600'
# Start logstash
USER logstash:logstash
CMD update.sh && exec /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash.conf

View file

@ -1,52 +0,0 @@
FROM alpine
# Include dist
ADD dist/ /root/dist/
# Setup env and apt
RUN apk -U upgrade && \
apk add bash \
curl \
git \
libc6-compat \
libzmq \
openjdk8-jre \
procps \
wget && \
# Get and install packages
git clone https://github.com/dtag-dev-sec/listbot /etc/listbot && \
cd /root/dist/ && \
mkdir -p /usr/share/logstash/ && \
wget https://artifacts.elastic.co/downloads/logstash/logstash-6.2.4.tar.gz && \
wget http://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN.tar.gz && \
tar xvfz logstash-6.2.4.tar.gz --strip-components=1 -C /usr/share/logstash/ && \
/usr/share/logstash/bin/logstash-plugin install logstash-filter-translate && \
/usr/share/logstash/bin/logstash-plugin install logstash-output-syslog && \
tar xvfz GeoLite2-ASN.tar.gz --strip-components=1 -C /usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-filter-geoip-5.0.3-java/vendor/ && \
# Add and move files
cd /root/dist/ && \
cp update.sh /usr/bin/ && \
chmod u+x /usr/bin/update.sh && \
mkdir -p /etc/logstash/conf.d && \
cp logstash.conf /etc/logstash/conf.d/ && \
cp elasticsearch-template-es6x.json /usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.1.1-java/lib/logstash/outputs/elasticsearch/ && \
# Setup user, groups and configs
addgroup -g 2000 logstash && \
adduser -S -H -s /bin/bash -u 2000 -D -g 2000 logstash && \
chown -R logstash:logstash /usr/share/logstash && \
chown -R logstash:logstash /etc/listbot && \
chmod 755 /usr/bin/update.sh && \
# Clean up
apk del --purge wget && \
rm -rf /root/*
# Healthcheck
HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:9600'
# Start logstash
#USER logstash:logstash
CMD update.sh && exec /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash.conf

View file

@ -1,449 +0,0 @@
# Input section
input {
# Suricata
file {
path => ["/data/suricata/log/eve.json"]
codec => json
type => "Suricata"
}
# P0f
file {
path => ["/data/p0f/log/p0f.json"]
codec => json
type => "P0f"
}
# Conpot
file {
path => ["/data/conpot/log/*.json"]
codec => json
type => "ConPot"
}
# Cowrie
file {
path => ["/data/cowrie/log/cowrie.json"]
codec => json
type => "Cowrie"
}
# Dionaea
file {
path => ["/data/dionaea/log/dionaea.json"]
codec => json
type => "Dionaea"
}
# Elasticpot
file {
path => ["/data/elasticpot/log/elasticpot.log"]
codec => json
type => "ElasticPot"
}
# eMobility
file {
path => ["/data/emobility/log/centralsystemEWS.log"]
type => "eMobility"
}
# Glastopf
file {
path => ["/data/glastopf/log/glastopf.log"]
type => "Glastopf"
}
# Glutton
file {
path => ["/data/glutton/log/glutton.log"]
codec => json
type => "Glutton"
}
# Heralding
file {
path => ["/data/heralding/log/auth.csv"]
type => "Heralding"
}
# Honeytrap
file {
path => ["/data/honeytrap/log/attackers.json"]
codec => json
type => "Honeytrap"
}
# Mailoney
file {
path => ["/data/mailoney/log/commands.log"]
type => "Mailoney"
}
# Rdpy
file {
path => ["/data/rdpy/log/rdpy.log"]
type => "Rdpy"
}
# Host Syslog
file {
path => ["/data/host/log/auth.log"]
codec => plain
type => "Syslog"
}
# Host NGINX
file {
path => ["/data/host/log/nginx/access.log"]
codec => json
type => "NGINX"
}
# Vnclowpot
file {
path => ["/data/vnclowpot/log/vnclowpot.log"]
type => "Vnclowpot"
}
}
# Filter Section
filter {
# Suricata
if [type] == "Suricata" {
date {
match => [ "timestamp", "ISO8601" ]
}
translate {
refresh_interval => 86400
field => "[alert][signature_id]"
destination => "[alert][cve_id]"
dictionary_path => "/etc/listbot/cve.yaml"
}
}
# P0f
if [type] == "P0f" {
date {
match => [ "timestamp", "yyyy'/'MM'/'dd HH:mm:ss" ]
remove_field => ["timestamp"]
}
mutate {
rename => {
"server_port" => "dest_port"
"server_ip" => "dest_ip"
"client_port" => "src_port"
"client_ip" => "src_ip"
}
}
}
# Conpot
if [type] == "ConPot" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Cowrie
if [type] == "Cowrie" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"dst_port" => "dest_port"
"dst_ip" => "dest_ip"
}
}
}
# Dionaea
if [type] == "Dionaea" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"dst_port" => "dest_port"
"dst_ip" => "dest_ip"
}
gsub => [
"src_ip", "::ffff:", "",
"dest_ip", "::ffff:", ""
]
}
if [credentials] {
mutate {
add_field => {
"login.username" => "%{[credentials][username]}"
"login.password" => "%{[credentials][password]}"
}
remove_field => "[credentials]"
}
}
}
# ElasticPot
if [type] == "ElasticPot" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# eMobility
if [type] == "eMobility" {
grok {
match => [ "message", "\A%{IP:src_ip}\.%{POSINT:src_port:integer}\|%{IP:dest_ip}\.%{POSINT:dest_port:integer}:%{SPACE}%{SYSLOG5424PRINTASCII}%{SPACE}%{SYSLOG5424PRINTASCII}%{SPACE}%{SYSLOG5424PRINTASCII}%{SPACE}%{SYSLOG5424PRINTASCII}%{SPACE}%{SYSLOG5424PRINTASCII}%{SPACE}%{SYSLOG5424PRINTASCII}%{SPACE}%{SYSLOG5424PRINTASCII}%{SPACE}%{SYSLOG5424PRINTASCII}%{SPACE}%{SYSLOG5424PRINTASCII}%{SPACE}%{SYSLOG5424SD}%{SYSLOG5424PRINTASCII}%{SPACE}%{SYSLOG5424PRINTASCII}%{SPACE}%{SYSLOG5424PRINTASCII}%{SPACE}%{URIPROTO:http_method}\|%{URIPATH:http_uri}\|%{TIMESTAMP_ISO8601:timestamp}" ]
}
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Glastopf
if [type] == "Glastopf" {
grok {
match => [ "message", "\A%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{NOTSPACE}%{SPACE}%{IP:src_ip}%{SPACE}%{WORD}%{SPACE}%{URIPROTO:http_method}%{SPACE}%{NOTSPACE:http_uri}%{SPACE}%{NOTSPACE}%{SPACE}%{HOSTNAME}:%{NUMBER:dest_port:integer}" ]
}
date {
match => [ "timestamp", "yyyy-MM-dd HH:mm:ss,SSS" ]
remove_field => ["timestamp"]
}
}
# Glutton
if [type] == "Glutton" {
date {
match => [ "ts", "UNIX" ]
remove_field => ["ts"]
}
}
# Heralding
if [type] == "Heralding" {
csv {
columns => ["timestamp","auth_id","session_id","src_ip","src_port","dest_ip","dest_port","proto","username","password"] separator => ","
}
date {
match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSSSSS" ]
remove_field => ["timestamp"]
}
}
# Honeytrap
if [type] == "Honeytrap" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"[attack_connection][local_port]" => "dest_port"
"[attack_connection][local_ip]" => "dest_ip"
"[attack_connection][remote_port]" => "src_port"
"[attack_connection][remote_ip]" => "src_ip"
}
}
}
# Mailoney
if [type] == "Mailoney" {
grok {
match => [ "message", "\A%{NAGIOSTIME}\[%{IPV4:src_ip}:%{INT:src_port:integer}] %{GREEDYDATA:smtp_input}" ]
}
mutate {
add_field => {
"dest_port" => "25"
}
}
date {
match => [ "nagios_epoch", "UNIX" ]
remove_field => ["nagios_epoch"]
}
}
# Rdpy
if [type] == "Rdpy" {
grok { match => { "message" => [ "\A%{TIMESTAMP_ISO8601:timestamp},domain:%{CISCO_REASON:domain},username:%{CISCO_REASON:username},password:%{CISCO_REASON:password},hostname:%{GREEDYDATA:hostname}", "\A%{TIMESTAMP_ISO8601:timestamp},Connection from %{IPV4:src_ip}:%{INT:src_port:integer}" ] } }
date {
match => [ "timestamp", "ISO8601" ]
remove_field => ["timestamp"]
}
mutate {
add_field => {
"dest_port" => "3389"
}
}
}
# Syslog
if [type] == "Syslog" {
grok {
match => {
"message" => ["%{SYSLOGPAMSESSION}", "%{CRONLOG}", "%{SYSLOGLINE}"]
}
overwrite => "message"
}
date {
match => [ "timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
remove_field => ["timestamp"]
}
date {
match => ["timestamp8601", "ISO8601"]
remove_field => ["timestamp8601"]
}
grok {
match => { "message" => "Connection closed by %{IP:src_ip}" }
add_tag => [ "ssh_connection_closed" ]
tag_on_failure => []
}
grok {
match => { "message" => "Received disconnect from %{IP:src_ip}" }
add_tag => [ "ssh_connection_disconnect" ]
tag_on_failure => []
}
grok {
match => { "message" => "Failed password for invalid user %{USERNAME:username} from %{IP:src_ip} port %{BASE10NUM:port} ssh2" }
add_tag => [ "ssh_failed_password" ]
tag_on_failure => []
}
grok {
match => { "message" => "Did not receive identification string from %{IP:src_ip}" }
add_tag => [ "ssh_no_id" ]
tag_on_failure => []
}
grok {
match => { "message" => "User %{USERNAME:username} from %{IP:src_ip} not allowed because not listed in AllowUsers" }
add_tag => [ "ssh_user_not_allowed" ]
tag_on_failure => []
}
grok {
match => { "message" => "authentication failure; logname=%{USERNAME:logname} uid=%{BASE10NUM:uid} euid=%{BASE10NUM:euid} tty=%{TTY:tty} ruser=%{USERNAME:ruser} rhost=(?:%{HOSTNAME:remote_host}|\s*) user=%{USERNAME:user}"}
add_tag => [ "ssh_auth_failure" ]
tag_on_failure => []
}
grok {
match => { "message" => "pam_unix\(sshd:auth\): authentication failure; logname= uid=0 euid=0 tty=%{NOTSPACE:tty} ruser= rhost=(?:%{HOSTNAME:remote_host}|\s*) user=%{USERNAME:user}"}
add_tag => [ "ssh_auth_failure" ]
tag_on_failure => []
}
grok {
match => { "message" => "Failed password for %{USERNAME:username} from %{IP:src_ip} port %{BASE10NUM:port} ssh2"}
add_tag => [ "ssh_failed_password" ]
tag_on_failure => []
}
grok {
match => { "message" => "Accepted password for %{USERNAME:username} from %{IP:src_ip} port %{BASE10NUM:port} ssh2"}
add_tag => [ "ssh_accepted_password" ]
tag_on_failure => []
}
grok {
match => { "message" => "Accepted publickey for %{USERNAME:username} from %{IP:src_ip} port %{BASE10NUM:port} ssh2"}
add_tag => [ "ssh_accepted_pubkey" ]
tag_on_failure => []
}
grok {
match => { "message" => "Accepted keyboard-interactive/pam for %{USERNAME:username} from %{IP:src_ip} port %{BASE10NUM:port} ssh2"}
add_tag => [ "ssh_accepted_interactive" ]
tag_on_failure => []
}
}
# NGINX
if [type] == "NGINX" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Vnclowpot
if [type] == "Vnclowpot" {
grok {
match => [ "message", "\A%{NOTSPACE}%{SPACE}%{TIME}%{SPACE}%{IPV4:src_ip}:%{INT:src_port}%{SPACE}%{NOTSPACE:vnc_handshake}" ]
}
date {
match => [ "timestamp", "yyyy/MM/dd HH:mm:ss" ]
remove_field => ["timestamp"]
}
mutate {
add_field => {
"dest_port" => "5900"
}
}
}
# Drop if parse fails
if "_grokparsefailure" in [tags] { drop {} }
# Add geo coordinates / ASN info / IP rep.
if [src_ip] {
geoip {
cache_size => 10000
source => "src_ip"
database => "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-filter-geoip-5.0.3-java/vendor/GeoLite2-City.mmdb"
}
geoip {
cache_size => 10000
source => "src_ip"
database => "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-filter-geoip-5.0.3-java/vendor/GeoLite2-ASN.mmdb"
}
translate {
refresh_interval => 86400
field => "src_ip"
destination => "ip_rep"
dictionary_path => "/etc/listbot/iprep.yaml"
}
}
# In some rare conditions dest_port, src_port is indexed as string, forcing integer for now
if [dest_port] {
mutate {
convert => { "dest_port" => "integer" }
}
}
if [src_port] {
mutate {
convert => { "src_port" => "integer" }
}
}
# Add T-Pot hostname and external IP
if [type] == "ConPot" or [type] == "Cowrie" or [type] == "Dionaea" or [type] == "ElasticPot" or [type] == "eMobility" or [type] == "Glastopf" or [type] == "Honeytrap" or [type] == "Heralding" or [type] == "Mailoney" or [type] == "Rdpy" or [type] == "Suricata" or [type] == "Vnclowpot" {
mutate {
add_field => {
"t-pot_ip_ext" => "${MY_EXTIP}"
"t-pot_ip_int" => "${MY_INTIP}"
"t-pot_hostname" => "${MY_HOSTNAME}"
}
}
}
}
# Output section
output {
elasticsearch {
path => "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.1.1-java/lib/logstash/outputs/elasticsearch/elasticsearch-template-es6x.json"
hosts => ["elasticsearch:9200"]
}
if [type] == "Suricata" {
file {
file_mode => 0760
path => "/data/suricata/log/suricata_ews.log"
}
}
# Debug output
#if [type] == "XYZ" {
# stdout {
# codec => rubydebug
# }
#}
# Debug output
#stdout {
# codec => rubydebug
#}
}

View file

@ -1,20 +0,0 @@
# T-Pot (Standard)
# For docker-compose ...
version: '2.2'
services:
## Logstash service
logstash:
build: .
container_name: logstash
restart: always
# depends_on:
# elasticsearch:
# condition: service_healthy
env_file:
- /opt/tpot/etc/compose/elk_environment
image: "dtagdevsec/logstash:1804"
volumes:
- /data:/data
- /var/log:/data/host/log