mirror of
https://github.com/telekom-security/tpotce.git
synced 2025-05-11 02:46:04 +00:00
update scripts to new file layout
iso will not include tpot repo tpot repo will be cloned during install as all the other tools
This commit is contained in:
parent
b5a4ef948f
commit
1347eac88f
43 changed files with 12923 additions and 13 deletions
bin
backup_es_folders.shclean.shdps.shdump_es.shexport_kibana-objects.shimport_kibana-objects.shmyip.shrestore_es.shupdateip.sh
etc
compose
curator
logrotate
objects
host
iso
makeiso.sh
38
bin/backup_es_folders.sh
Executable file
38
bin/backup_es_folders.sh
Executable file
|
@ -0,0 +1,38 @@
|
|||
#!/bin/bash
|
||||
# Backup all ES relevant folders
|
||||
# Make sure ES is available
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
|
||||
if ! [ "$myESSTATUS" = "1" ]
|
||||
then
|
||||
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
|
||||
exit
|
||||
else
|
||||
echo "### Elasticsearch is available, now continuing."
|
||||
echo
|
||||
fi
|
||||
|
||||
# Set vars
|
||||
myCOUNT=1
|
||||
myDATE=$(date +%Y%m%d%H%M)
|
||||
myELKPATH="/data/elk/data"
|
||||
myKIBANAINDEXNAME=$(curl -s -XGET ''$myES'_cat/indices/' | grep .kibana | awk '{ print $4 }')
|
||||
myKIBANAINDEXPATH=$myELKPATH/nodes/0/indices/$myKIBANAINDEXNAME
|
||||
|
||||
# Let's ensure normal operation on exit or if interrupted ...
|
||||
function fuCLEANUP {
|
||||
### Start ELK
|
||||
systemctl start tpot
|
||||
echo "### Now starting T-Pot ..."
|
||||
}
|
||||
trap fuCLEANUP EXIT
|
||||
|
||||
# Stop T-Pot to lift db lock
|
||||
echo "### Now stopping T-Pot"
|
||||
systemctl stop tpot
|
||||
sleep 2
|
||||
|
||||
# Backup DB in 2 flavors
|
||||
echo "### Now backing up Elasticsearch folders ..."
|
||||
tar cvfz "elkall_"$myDATE".tgz" $myELKPATH
|
||||
tar cvfz "elkbase_"$myDATE".tgz" $myKIBANAINDEXPATH
|
219
bin/clean.sh
Executable file
219
bin/clean.sh
Executable file
|
@ -0,0 +1,219 @@
|
|||
#!/bin/bash
|
||||
# T-Pot Container Data Cleaner & Log Rotator
|
||||
|
||||
# Set colors
|
||||
myRED="[0;31m"
|
||||
myGREEN="[0;32m"
|
||||
myWHITE="[0;0m"
|
||||
|
||||
# Set persistence
|
||||
myPERSISTENCE=$1
|
||||
|
||||
# Let's create a function to check if folder is empty
|
||||
fuEMPTY () {
|
||||
local myFOLDER=$1
|
||||
|
||||
echo $(ls $myFOLDER | wc -l)
|
||||
}
|
||||
|
||||
# Let's create a function to rotate and compress logs
|
||||
fuLOGROTATE () {
|
||||
local mySTATUS="/opt/tpot/etc/logrotate/status"
|
||||
local myCONF="/opt/tpot/etc/logrotate/logrotate.conf"
|
||||
local myCOWRIETTYLOGS="/data/cowrie/log/tty/"
|
||||
local myCOWRIETTYTGZ="/data/cowrie/log/ttylogs.tgz"
|
||||
local myCOWRIEDL="/data/cowrie/downloads/"
|
||||
local myCOWRIEDLTGZ="/data/cowrie/downloads.tgz"
|
||||
local myDIONAEABI="/data/dionaea/bistreams/"
|
||||
local myDIONAEABITGZ="/data/dionaea/bistreams.tgz"
|
||||
local myDIONAEABIN="/data/dionaea/binaries/"
|
||||
local myDIONAEABINTGZ="/data/dionaea/binaries.tgz"
|
||||
local myHONEYTRAPATTACKS="/data/honeytrap/attacks/"
|
||||
local myHONEYTRAPATTACKSTGZ="/data/honeytrap/attacks.tgz"
|
||||
local myHONEYTRAPDL="/data/honeytrap/downloads/"
|
||||
local myHONEYTRAPDLTGZ="/data/honeytrap/downloads.tgz"
|
||||
|
||||
# Ensure correct permissions and ownerships for logrotate to run without issues
|
||||
chmod 760 /data/ -R
|
||||
chown tpot:tpot /data -R
|
||||
|
||||
# Run logrotate with force (-f) first, so the status file can be written and race conditions (with tar) be avoided
|
||||
logrotate -f -s $mySTATUS $myCONF
|
||||
|
||||
# Compressing some folders first and rotate them later
|
||||
if [ "$(fuEMPTY $myCOWRIETTYLOGS)" != "0" ]; then tar cvfz $myCOWRIETTYTGZ $myCOWRIETTYLOGS; fi
|
||||
if [ "$(fuEMPTY $myCOWRIEDL)" != "0" ]; then tar cvfz $myCOWRIEDLTGZ $myCOWRIEDL; fi
|
||||
if [ "$(fuEMPTY $myDIONAEABI)" != "0" ]; then tar cvfz $myDIONAEABITGZ $myDIONAEABI; fi
|
||||
if [ "$(fuEMPTY $myDIONAEABIN)" != "0" ]; then tar cvfz $myDIONAEABINTGZ $myDIONAEABIN; fi
|
||||
if [ "$(fuEMPTY $myHONEYTRAPATTACKS)" != "0" ]; then tar cvfz $myHONEYTRAPATTACKSTGZ $myHONEYTRAPATTACKS; fi
|
||||
if [ "$(fuEMPTY $myHONEYTRAPDL)" != "0" ]; then tar cvfz $myHONEYTRAPDLTGZ $myHONEYTRAPDL; fi
|
||||
|
||||
# Ensure correct permissions and ownership for previously created archives
|
||||
chmod 760 $myCOWRIETTYTGZ $myCOWRIEDLTGZ $myDIONAEABITGZ $myDIONAEABINTGZ $myHONEYTRAPATTACKSTGZ $myHONEYTRAPDLTGZ
|
||||
chown tpot:tpot $myCOWRIETTYTGZ $myCOWRIEDLTGZ $myDIONAEABITGZ $myDIONAEABINTGZ $myHONEYTRAPATTACKSTGZ $myHONEYTRAPDLTGZ
|
||||
|
||||
# Need to remove subfolders since too many files cause rm to exit with errors
|
||||
rm -rf $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL
|
||||
|
||||
# Recreate subfolders with correct permissions and ownership
|
||||
mkdir -p $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL
|
||||
chmod 760 $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL
|
||||
chown tpot:tpot $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL
|
||||
|
||||
# Run logrotate again to account for previously created archives - DO NOT FORCE HERE!
|
||||
logrotate -s $mySTATUS $myCONF
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare conpot data
|
||||
fuCONPOT () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/conpot/*; fi
|
||||
mkdir -p /data/conpot/log
|
||||
chmod 760 /data/conpot -R
|
||||
chown tpot:tpot /data/conpot -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare cowrie data
|
||||
fuCOWRIE () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/cowrie/*; fi
|
||||
mkdir -p /data/cowrie/log/tty/ /data/cowrie/downloads/ /data/cowrie/keys/ /data/cowrie/misc/
|
||||
chmod 760 /data/cowrie -R
|
||||
chown tpot:tpot /data/cowrie -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare dionaea data
|
||||
fuDIONAEA () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/dionaea/*; fi
|
||||
mkdir -p /data/dionaea/log /data/dionaea/bistreams /data/dionaea/binaries /data/dionaea/rtp /data/dionaea/roots/ftp /data/dionaea/roots/tftp /data/dionaea/roots/www /data/dionaea/roots/upnp
|
||||
chmod 760 /data/dionaea -R
|
||||
chown tpot:tpot /data/dionaea -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare elasticpot data
|
||||
fuELASTICPOT () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/elasticpot/*; fi
|
||||
mkdir -p /data/elasticpot/log
|
||||
chmod 760 /data/elasticpot -R
|
||||
chown tpot:tpot /data/elasticpot -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare elk data
|
||||
fuELK () {
|
||||
# ELK data will be kept for <= 90 days, check /etc/crontab for curator modification
|
||||
# ELK daemon log files will be removed
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/elk/log/*; fi
|
||||
mkdir -p /data/elk
|
||||
chmod 760 /data/elk -R
|
||||
chown tpot:tpot /data/elk -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare emobility data
|
||||
fuEMOBILITY () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/emobility/*; fi
|
||||
mkdir -p /data/emobility/log
|
||||
chmod 760 /data/emobility -R
|
||||
chown tpot:tpot /data/emobility -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare glastopf data
|
||||
fuGLASTOPF () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/glastopf/*; fi
|
||||
mkdir -p /data/glastopf
|
||||
chmod 760 /data/glastopf -R
|
||||
chown tpot:tpot /data/glastopf -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare honeytrap data
|
||||
fuHONEYTRAP () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeytrap/*; fi
|
||||
mkdir -p /data/honeytrap/log/ /data/honeytrap/attacks/ /data/honeytrap/downloads/
|
||||
chmod 760 /data/honeytrap/ -R
|
||||
chown tpot:tpot /data/honeytrap/ -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare mailoney data
|
||||
fuMAILONEY () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/mailoney/*; fi
|
||||
mkdir -p /data/mailoney/log/
|
||||
chmod 760 /data/mailoney/ -R
|
||||
chown tpot:tpot /data/mailoney/ -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare rdpy data
|
||||
fuRDPY () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/rdpy/*; fi
|
||||
mkdir -p /data/rdpy/log/
|
||||
chmod 760 /data/rdpy/ -R
|
||||
chown tpot:tpot /data/rdpy/ -R
|
||||
}
|
||||
|
||||
# Let's create a function to prepare spiderfoot db
|
||||
fuSPIDERFOOT () {
|
||||
mkdir -p /data/spiderfoot
|
||||
touch /data/spiderfoot/spiderfoot.db
|
||||
chmod 760 -R /data/spiderfoot
|
||||
chown tpot:tpot -R /data/spiderfoot
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare suricata data
|
||||
fuSURICATA () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/suricata/*; fi
|
||||
mkdir -p /data/suricata/log
|
||||
chmod 760 -R /data/suricata
|
||||
chown tpot:tpot -R /data/suricata
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare p0f data
|
||||
fuP0F () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/p0f/*; fi
|
||||
mkdir -p /data/p0f/log
|
||||
chmod 760 -R /data/p0f
|
||||
chown tpot:tpot -R /data/p0f
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare vnclowpot data
|
||||
fuVNCLOWPOT () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/vnclowpot/*; fi
|
||||
mkdir -p /data/vnclowpot/log/
|
||||
chmod 760 /data/vnclowpot/ -R
|
||||
chown tpot:tpot /data/vnclowpot/ -R
|
||||
}
|
||||
|
||||
|
||||
# Avoid unwanted cleaning
|
||||
if [ "$myPERSISTENCE" = "" ];
|
||||
then
|
||||
echo $myRED"!!! WARNING !!! - This will delete ALL honeypot logs. "$myWHITE
|
||||
while [ "$myQST" != "y" ] && [ "$myQST" != "n" ];
|
||||
do
|
||||
read -p "Continue? (y/n) " myQST
|
||||
done
|
||||
if [ "$myQST" = "n" ];
|
||||
then
|
||||
echo $myGREEN"Puuh! That was close! Aborting!"$myWHITE
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check persistence, if enabled compress and rotate logs
|
||||
if [ "$myPERSISTENCE" = "on" ];
|
||||
then
|
||||
echo "Persistence enabled, now rotating and compressing logs."
|
||||
fuLOGROTATE
|
||||
else
|
||||
echo "Cleaning up and preparing data folders."
|
||||
fuCONPOT
|
||||
fuCOWRIE
|
||||
fuDIONAEA
|
||||
fuELASTICPOT
|
||||
fuELK
|
||||
fuEMOBILITY
|
||||
fuGLASTOPF
|
||||
fuHONEYTRAP
|
||||
fuMAILONEY
|
||||
fuRDPY
|
||||
fuSPIDERFOOT
|
||||
fuSURICATA
|
||||
fuP0F
|
||||
fuVNCLOWPOT
|
||||
fi
|
||||
|
71
bin/dps.sh
Executable file
71
bin/dps.sh
Executable file
|
@ -0,0 +1,71 @@
|
|||
#/bin/bash
|
||||
# Show current status of all running containers
|
||||
myPARAM="$1"
|
||||
myIMAGES="$(cat /opt/tpot/etc/tpot.yml | grep -v '#' | grep container_name | cut -d: -f2)"
|
||||
myRED="[1;31m"
|
||||
myGREEN="[1;32m"
|
||||
myBLUE="[1;34m"
|
||||
myWHITE="[0;0m"
|
||||
myMAGENTA="[1;35m"
|
||||
|
||||
function fuCONTAINERSTATUS {
|
||||
local myNAME="$1"
|
||||
local mySTATUS="$(/usr/bin/docker ps -f name=$myNAME --format "table {{.Status}}" -f status=running -f status=exited | tail -n 1)"
|
||||
myDOWN="$(echo "$mySTATUS" | grep -o -E "(STATUS|NAMES|Exited)")"
|
||||
|
||||
case "$myDOWN" in
|
||||
STATUS)
|
||||
mySTATUS="$myRED"DOWN"$myWHITE"
|
||||
;;
|
||||
NAMES)
|
||||
mySTATUS="$myRED"DOWN"$myWHITE"
|
||||
;;
|
||||
Exited)
|
||||
mySTATUS="$myRED$mySTATUS$myWHITE"
|
||||
;;
|
||||
*)
|
||||
mySTATUS="$myGREEN$mySTATUS$myWHITE"
|
||||
;;
|
||||
esac
|
||||
|
||||
printf "$mySTATUS"
|
||||
}
|
||||
|
||||
function fuCONTAINERPORTS {
|
||||
local myNAME="$1"
|
||||
local myPORTS="$(/usr/bin/docker ps -f name=$myNAME --format "table {{.Ports}}" -f status=running -f status=exited | tail -n 1 | sed s/","/",\n\t\t\t\t\t\t\t"/g)"
|
||||
|
||||
if [ "$myPORTS" != "PORTS" ];
|
||||
then
|
||||
printf "$myBLUE$myPORTS$myWHITE"
|
||||
fi
|
||||
}
|
||||
|
||||
function fuGETSYS {
|
||||
printf "========| System |========\n"
|
||||
printf "%+10s %-20s\n" "Date: " "$(date)"
|
||||
printf "%+10s %-20s\n" "Uptime: " "$(uptime | cut -b 2-)"
|
||||
printf "%+10s %-20s\n" "CPU temp: " "$(sensors | grep 'Physical' | awk '{ print $4" " }' | tr -d [:cntrl:])"
|
||||
echo
|
||||
}
|
||||
|
||||
while true
|
||||
do
|
||||
fuGETSYS
|
||||
printf "%-19s %-36s %s\n" "NAME" "STATUS" "PORTS"
|
||||
for i in $myIMAGES; do
|
||||
myNAME="$myMAGENTA$i$myWHITE"
|
||||
printf "%-32s %-49s %s" "$myNAME" "$(fuCONTAINERSTATUS $i)" "$(fuCONTAINERPORTS $i)"
|
||||
echo
|
||||
if [ "$myPARAM" = "vv" ];
|
||||
then
|
||||
/usr/bin/docker exec -t "$i" /bin/ps awfuwfxwf | egrep -v -E "awfuwfxwf|/bin/ps"
|
||||
fi
|
||||
done
|
||||
if [[ $myPARAM =~ ^([1-9]|[1-9][0-9]|[1-9][0-9][0-9])$ ]];
|
||||
then
|
||||
sleep "$myPARAM"
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
45
bin/dump_es.sh
Executable file
45
bin/dump_es.sh
Executable file
|
@ -0,0 +1,45 @@
|
|||
#/bin/bash
|
||||
# Dump all ES data
|
||||
# Make sure ES is available
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
|
||||
if ! [ "$myESSTATUS" = "1" ]
|
||||
then
|
||||
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
|
||||
exit
|
||||
else
|
||||
echo "### Elasticsearch is available, now continuing."
|
||||
echo
|
||||
fi
|
||||
|
||||
# Let's ensure normal operation on exit or if interrupted ...
|
||||
function fuCLEANUP {
|
||||
rm -rf tmp
|
||||
}
|
||||
trap fuCLEANUP EXIT
|
||||
|
||||
# Set vars
|
||||
myDATE=$(date +%Y%m%d%H%M)
|
||||
myINDICES=$(curl -s -XGET ''$myES'_cat/indices/' | grep logstash | awk '{ print $3 }' | sort | grep -v 1970)
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myCOL1="[0;34m"
|
||||
myCOL0="[0;0m"
|
||||
|
||||
# Dumping all ES data
|
||||
echo $myCOL1"### The following indices will be dumped: "$myCOL0
|
||||
echo $myINDICES
|
||||
echo
|
||||
|
||||
mkdir tmp
|
||||
for i in $myINDICES;
|
||||
do
|
||||
echo $myCOL1"### Now dumping: "$i $myCOL0
|
||||
elasticdump --input=$myES$i --output="tmp/"$i --limit 7500
|
||||
echo $myCOL1"### Now compressing: tmp/$i" $myCOL0
|
||||
gzip -f "tmp/"$i
|
||||
done;
|
||||
|
||||
# Build tar archive
|
||||
echo $myCOL1"### Now building tar archive: es_dump_"$myDATE".tgz" $myCOL0
|
||||
tar cvf es_dump_$myDATE.tar tmp/*
|
||||
echo $myCOL1"### Done."$myCOL0
|
77
bin/export_kibana-objects.sh
Executable file
77
bin/export_kibana-objects.sh
Executable file
|
@ -0,0 +1,77 @@
|
|||
#!/bin/bash
|
||||
# Export all Kibana objects
|
||||
# Make sure ES is available
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
|
||||
if ! [ "$myESSTATUS" = "1" ]
|
||||
then
|
||||
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
|
||||
exit
|
||||
else
|
||||
echo "### Elasticsearch is available, now continuing."
|
||||
echo
|
||||
fi
|
||||
|
||||
# Set vars
|
||||
myDATE=$(date +%Y%m%d%H%M)
|
||||
myINDEXCOUNT=$(curl -s -XGET ''$myES'.kibana/index-pattern/logstash-*' | tr '\\' '\n' | grep "scripted" | wc -w)
|
||||
myDASHBOARDS=$(curl -s -XGET ''$myES'.kibana/dashboard/_search?filter_path=hits.hits._id&pretty&size=10000' | jq '.hits.hits[] | {_id}' | jq -r '._id')
|
||||
myVISUALIZATIONS=$(curl -s -XGET ''$myES'.kibana/visualization/_search?filter_path=hits.hits._id&pretty&size=10000' | jq '.hits.hits[] | {_id}' | jq -r '._id')
|
||||
mySEARCHES=$(curl -s -XGET ''$myES'.kibana/search/_search?filter_path=hits.hits._id&pretty&size=10000' | jq '.hits.hits[] | {_id}' | jq -r '._id')
|
||||
myCOL1="[0;34m"
|
||||
myCOL0="[0;0m"
|
||||
|
||||
# Let's ensure normal operation on exit or if interrupted ...
|
||||
function fuCLEANUP {
|
||||
rm -rf patterns/ dashboards/ visualizations/ searches/
|
||||
}
|
||||
trap fuCLEANUP EXIT
|
||||
|
||||
# Export index patterns
|
||||
mkdir -p patterns
|
||||
echo $myCOL1"### Now exporting"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
|
||||
curl -s -XGET ''$myES'.kibana/index-pattern/logstash-*?' | jq '._source' > patterns/index-patterns.json
|
||||
echo
|
||||
|
||||
# Export dashboards
|
||||
mkdir -p dashboards
|
||||
echo $myCOL1"### Now exporting"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0
|
||||
for i in $myDASHBOARDS;
|
||||
do
|
||||
echo $myCOL1"###### "$i $myCOL0
|
||||
curl -s -XGET ''$myES'.kibana/dashboard/'$i'' | jq '._source' > dashboards/$i.json
|
||||
done;
|
||||
echo
|
||||
|
||||
# Export visualizations
|
||||
mkdir -p visualizations
|
||||
echo $myCOL1"### Now exporting"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0
|
||||
for i in $myVISUALIZATIONS;
|
||||
do
|
||||
echo $myCOL1"###### "$i $myCOL0
|
||||
curl -s -XGET ''$myES'.kibana/visualization/'$i'' | jq '._source' > visualizations/$i.json
|
||||
done;
|
||||
echo
|
||||
|
||||
# Export searches
|
||||
mkdir -p searches
|
||||
echo $myCOL1"### Now exporting"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0
|
||||
for i in $mySEARCHES;
|
||||
do
|
||||
echo $myCOL1"###### "$i $myCOL0
|
||||
curl -s -XGET ''$myES'.kibana/search/'$i'' | jq '._source' > searches/$i.json
|
||||
done;
|
||||
echo
|
||||
|
||||
# Building tar archive
|
||||
echo $myCOL1"### Now building archive"$myCOL0 "kibana-objects_"$myDATE".tgz"
|
||||
tar cvfz kibana-objects_$myDATE.tgz patterns dashboards visualizations searches > /dev/null
|
||||
|
||||
# Stats
|
||||
echo
|
||||
echo $myCOL1"### Statistics"
|
||||
echo $myCOL1"###### Exported"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
|
||||
echo $myCOL1"###### Exported"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0
|
||||
echo $myCOL1"###### Exported"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0
|
||||
echo $myCOL1"###### Exported"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0
|
||||
echo
|
91
bin/import_kibana-objects.sh
Executable file
91
bin/import_kibana-objects.sh
Executable file
|
@ -0,0 +1,91 @@
|
|||
#!/bin/bash
|
||||
# Import Kibana objects
|
||||
# Make sure ES is available
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
|
||||
if ! [ "$myESSTATUS" = "1" ]
|
||||
then
|
||||
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
|
||||
exit
|
||||
else
|
||||
echo "### Elasticsearch is available, now continuing."
|
||||
echo
|
||||
fi
|
||||
|
||||
# Set vars
|
||||
myDUMP=$1
|
||||
myCOL1="[0;34m"
|
||||
myCOL0="[0;0m"
|
||||
|
||||
# Let's ensure normal operation on exit or if interrupted ...
|
||||
function fuCLEANUP {
|
||||
rm -rf patterns/ dashboards/ visualizations/ searches/
|
||||
}
|
||||
trap fuCLEANUP EXIT
|
||||
|
||||
# Check if parameter is given and file exists
|
||||
if [ "$myDUMP" = "" ];
|
||||
then
|
||||
echo $myCOL1"### Please provide a backup file name."$myCOL0
|
||||
echo $myCOL1"### restore-kibana-objects.sh <kibana-objects.tgz>"$myCOL0
|
||||
echo
|
||||
exit
|
||||
fi
|
||||
if ! [ -a $myDUMP ];
|
||||
then
|
||||
echo $myCOL1"### File not found."$myCOL0
|
||||
exit
|
||||
fi
|
||||
|
||||
# Unpack tar
|
||||
tar xvfz $myDUMP > /dev/null
|
||||
|
||||
# Restore index patterns
|
||||
myINDEXCOUNT=$(cat patterns/index-patterns.json | tr '\\' '\n' | grep "scripted" | wc -w)
|
||||
echo $myCOL1"### Now importing"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
|
||||
curl -s -XDELETE ''$myES'.kibana/index-pattern/logstash-*' > /dev/null
|
||||
curl -s -XPUT ''$myES'.kibana/index-pattern/logstash-*' -T patterns/index-patterns.json > /dev/null
|
||||
echo
|
||||
|
||||
# Restore dashboards
|
||||
myDASHBOARDS=$(ls dashboards/*.json | cut -c 12- | rev | cut -c 6- | rev)
|
||||
echo $myCOL1"### Now importing "$myCOL0$(echo $myDASHBOARDS | wc -w)$myCOL1 "dashboards." $myCOL0
|
||||
for i in $myDASHBOARDS;
|
||||
do
|
||||
echo $myCOL1"###### "$i $myCOL0
|
||||
curl -s -XDELETE ''$myES'.kibana/dashboard/'$i'' > /dev/null
|
||||
curl -s -XPUT ''$myES'.kibana/dashboard/'$i'' -T dashboards/$i.json > /dev/null
|
||||
done;
|
||||
echo
|
||||
|
||||
# Restore visualizations
|
||||
myVISUALIZATIONS=$(ls visualizations/*.json | cut -c 16- | rev | cut -c 6- | rev)
|
||||
echo $myCOL1"### Now importing "$myCOL0$(echo $myVISUALIZATIONS | wc -w)$myCOL1 "visualizations." $myCOL0
|
||||
for i in $myVISUALIZATIONS;
|
||||
do
|
||||
echo $myCOL1"###### "$i $myCOL0
|
||||
curl -s -XDELETE ''$myES'.kibana/visualization/'$i'' > /dev/null
|
||||
curl -s -XPUT ''$myES'.kibana/visualization/'$i'' -T visualizations/$i.json > /dev/null
|
||||
done;
|
||||
echo
|
||||
|
||||
# Restore searches
|
||||
mySEARCHES=$(ls searches/*.json | cut -c 10- | rev | cut -c 6- | rev)
|
||||
echo $myCOL1"### Now importing "$myCOL0$(echo $mySEARCHES | wc -w)$myCOL1 "searches." $myCOL0
|
||||
for i in $mySEARCHES;
|
||||
do
|
||||
echo $myCOL1"###### "$i $myCOL0
|
||||
curl -s -XDELETE ''$myES'.kibana/search/'$i'' > /dev/null
|
||||
curl -s -XPUT ''$myES'.kibana/search/'$i'' -T searches/$i.json > /dev/null
|
||||
done;
|
||||
echo
|
||||
|
||||
# Stats
|
||||
echo
|
||||
echo $myCOL1"### Statistics"
|
||||
echo $myCOL1"###### Imported"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
|
||||
echo $myCOL1"###### Imported"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0
|
||||
echo $myCOL1"###### Imported"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0
|
||||
echo $myCOL1"###### Imported"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0
|
||||
echo
|
||||
|
88
bin/myip.sh
Executable file
88
bin/myip.sh
Executable file
|
@ -0,0 +1,88 @@
|
|||
#!/bin/bash
|
||||
|
||||
## Get my external IP
|
||||
|
||||
timeout=2 # seconds to wait for a reply before trying next server
|
||||
verbose=1 # prints which server was used to STDERR
|
||||
|
||||
dnslist=(
|
||||
"dig +short myip.opendns.com @resolver1.opendns.com"
|
||||
"dig +short myip.opendns.com @resolver2.opendns.com"
|
||||
"dig +short myip.opendns.com @resolver3.opendns.com"
|
||||
"dig +short myip.opendns.com @resolver4.opendns.com"
|
||||
"dig +short -4 -t a whoami.akamai.net @ns1-1.akamaitech.net"
|
||||
"dig +short whoami.akamai.net @ns1-1.akamaitech.net"
|
||||
)
|
||||
|
||||
httplist=(
|
||||
alma.ch/myip.cgi
|
||||
api.infoip.io/ip
|
||||
api.ipify.org
|
||||
bot.whatismyipaddress.com
|
||||
canhazip.com
|
||||
checkip.amazonaws.com
|
||||
eth0.me
|
||||
icanhazip.com
|
||||
ident.me
|
||||
ipecho.net/plain
|
||||
ipinfo.io/ip
|
||||
ipof.in/txt
|
||||
ip.tyk.nu
|
||||
l2.io/ip
|
||||
smart-ip.net/myip
|
||||
wgetip.com
|
||||
whatismyip.akamai.com
|
||||
)
|
||||
|
||||
# function to shuffle the global array "array"
|
||||
shuffle() {
|
||||
local i tmp size max rand
|
||||
size=${#array[*]}
|
||||
max=$(( 32768 / size * size ))
|
||||
for ((i=size-1; i>0; i--)); do
|
||||
while (( (rand=$RANDOM) >= max )); do :; done
|
||||
rand=$(( rand % (i+1) ))
|
||||
tmp=${array[i]} array[i]=${array[rand]} array[rand]=$tmp
|
||||
done
|
||||
}
|
||||
|
||||
# if we have dig and a list of dns methods, try that first
|
||||
if hash dig 2>/dev/null && [ ${#dnslist[*]} -gt 0 ]; then
|
||||
eval array=( \"\${dnslist[@]}\" )
|
||||
shuffle
|
||||
|
||||
for cmd in "${array[@]}"; do
|
||||
[ "$verbose" == 1 ] && echo Trying: $cmd 1>&2
|
||||
ip=$(timeout $timeout $cmd)
|
||||
if [ -n "$ip" ]; then
|
||||
echo $ip
|
||||
exit
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# if we haven't succeeded with DNS, try HTTP
|
||||
if [ ${#httplist[*]} == 0 ]; then
|
||||
echo "No hosts in httplist array!" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# use curl or wget, depending on which one we find
|
||||
curl_or_wget=$(if hash curl 2>/dev/null; then echo curl; elif hash wget 2>/dev/null; then echo "wget -qO-"; fi);
|
||||
|
||||
if [ -z "$curl_or_wget" ]; then
|
||||
echo "Neither curl nor wget found. Cannot use http method." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
eval array=( \"\${httplist[@]}\" )
|
||||
shuffle
|
||||
|
||||
for url in "${array[@]}"; do
|
||||
[ "$verbose" == 1 ] && echo Trying: $curl_or_wget -s "$url" 1>&2
|
||||
ip=$(timeout $timeout $curl_or_wget -s "$url")
|
||||
if [ -n "$ip" ]; then
|
||||
echo $ip
|
||||
exit
|
||||
fi
|
||||
done
|
61
bin/restore_es.sh
Executable file
61
bin/restore_es.sh
Executable file
|
@ -0,0 +1,61 @@
|
|||
#/bin/bash
|
||||
# Restore folder based ES backup
|
||||
# Make sure ES is available
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
|
||||
if ! [ "$myESSTATUS" = "1" ]
|
||||
then
|
||||
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
|
||||
exit
|
||||
else
|
||||
echo "### Elasticsearch is available, now continuing."
|
||||
fi
|
||||
|
||||
# Let's ensure normal operation on exit or if interrupted ...
|
||||
function fuCLEANUP {
|
||||
rm -rf tmp
|
||||
}
|
||||
trap fuCLEANUP EXIT
|
||||
|
||||
# Set vars
|
||||
myDUMP=$1
|
||||
myCOL1="[0;34m"
|
||||
myCOL0="[0;0m"
|
||||
|
||||
# Check if parameter is given and file exists
|
||||
if [ "$myDUMP" = "" ];
|
||||
then
|
||||
echo $myCOL1"### Please provide a backup file name."$myCOL0
|
||||
echo $myCOL1"### restore-elk.sh <es_dump.tar>"$myCOL0
|
||||
echo
|
||||
exit
|
||||
fi
|
||||
if ! [ -a $myDUMP ];
|
||||
then
|
||||
echo $myCOL1"### File not found."$myCOL0
|
||||
exit
|
||||
fi
|
||||
|
||||
# Unpack tar archive
|
||||
echo $myCOL1"### Now unpacking tar archive: "$myDUMP $myCOL0
|
||||
tar xvf $myDUMP
|
||||
|
||||
# Build indices list
|
||||
myINDICES=$(ls tmp/logstash*.gz | cut -c 5- | rev | cut -c 4- | rev)
|
||||
echo $myCOL1"### The following indices will be restored: "$myCOL0
|
||||
echo $myINDICES
|
||||
echo
|
||||
|
||||
# Restore indices
|
||||
for i in $myINDICES;
|
||||
do
|
||||
# Delete index if it already exists
|
||||
curl -s -XDELETE $myES$i > /dev/null
|
||||
echo $myCOL1"### Now uncompressing: tmp/$i.gz" $myCOL0
|
||||
gunzip -f tmp/$i.gz
|
||||
# Restore index to ES
|
||||
echo $myCOL1"### Now restoring: "$i $myCOL0
|
||||
elasticdump --input=tmp/$i --output=$myES$i --limit 7500
|
||||
rm tmp/$i
|
||||
done;
|
||||
echo $myCOL1"### Done."$myCOL0
|
24
bin/updateip.sh
Executable file
24
bin/updateip.sh
Executable file
|
@ -0,0 +1,24 @@
|
|||
#!/bin/bash
|
||||
# Let's add the first local ip to the /etc/issue and external ip to ews.ip file
|
||||
# If the external IP cannot be detected, the internal IP will be inherited.
|
||||
source /etc/environment
|
||||
myLOCALIP=$(hostname -I | awk '{ print $1 }')
|
||||
myEXTIP=$(/opt/tpot/bin/myip.sh)
|
||||
if [ "$myEXTIP" = "" ];
|
||||
then
|
||||
myEXTIP=$myLOCALIP
|
||||
fi
|
||||
sed -i "s#IP:.*#IP: $myLOCALIP ($myEXTIP)[0m#" /etc/issue
|
||||
sed -i "s#SSH:.*#SSH: ssh -l tsec -p 64295 $myLOCALIP[0m#" /etc/issue
|
||||
sed -i "s#WEB:.*#WEB: https://$myLOCALIP:64297[0m#" /etc/issue
|
||||
tee /data/ews/conf/ews.ip << EOF
|
||||
[MAIN]
|
||||
ip = $myEXTIP
|
||||
EOF
|
||||
tee /opt/tpot/etc/compose/elk_environment << EOF
|
||||
MY_EXTIP=$myEXTIP
|
||||
MY_INTIP=$myLOCALIP
|
||||
MY_HOSTNAME=$HOSTNAME
|
||||
EOF
|
||||
chown tpot:tpot /data/ews/conf/ews.ip
|
||||
chmod 760 /data/ews/conf/ews.ip
|
313
etc/compose/all.yml
Normal file
313
etc/compose/all.yml
Normal file
|
@ -0,0 +1,313 @@
|
|||
# T-Pot (Everything)
|
||||
# For docker-compose ...
|
||||
version: '2.1'
|
||||
|
||||
networks:
|
||||
conpot_local:
|
||||
cowrie_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
emobility_local:
|
||||
ewsposter_local:
|
||||
glastopf_local:
|
||||
mailoney_local:
|
||||
rdpy_local:
|
||||
spiderfoot_local:
|
||||
ui-for-docker_local:
|
||||
vnclowpot_local:
|
||||
|
||||
services:
|
||||
|
||||
# Conpot service
|
||||
conpot:
|
||||
container_name: conpot
|
||||
restart: always
|
||||
networks:
|
||||
- conpot_local
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: "dtagdevsec/conpot:1710"
|
||||
volumes:
|
||||
- /data/conpot/log:/var/log/conpot
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
networks:
|
||||
- cowrie_local
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
ports:
|
||||
- "22:2222"
|
||||
- "23:2223"
|
||||
image: "dtagdevsec/cowrie:1710"
|
||||
volumes:
|
||||
- /data/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- /data/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- /data/cowrie/log:/home/cowrie/cowrie/log
|
||||
- /data/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
restart: always
|
||||
networks:
|
||||
- dionaea_local
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "8081:80"
|
||||
- "135:135"
|
||||
- "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "1900:1900/udp"
|
||||
- "3306:3306"
|
||||
- "5060:5060"
|
||||
- "5060:5060/udp"
|
||||
- "5061:5061"
|
||||
- "27017:27017"
|
||||
image: "dtagdevsec/dionaea:1710"
|
||||
volumes:
|
||||
- /data/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- /data/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- /data/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- /data/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- /data/dionaea:/opt/dionaea/var/dionaea
|
||||
- /data/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- /data/dionaea/log:/opt/dionaea/var/log
|
||||
- /data/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# Elasticpot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: "dtagdevsec/elasticpot:1710"
|
||||
volumes:
|
||||
- /data/elasticpot/log:/opt/ElasticpotPY/log
|
||||
|
||||
# ELK services
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
# - "ES_JAVA_OPTS=-Xms1g -Xmx1g"
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
# mem_limit: 2g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: "dtagdevsec/elasticsearch:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: "dtagdevsec/kibana:1710"
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
env_file:
|
||||
- /opt/tpot/etc/compose/elk_environment
|
||||
image: "dtagdevsec/logstash:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /var/log:/data/host/log
|
||||
|
||||
## Elasticsearch-head service
|
||||
head:
|
||||
container_name: head
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64302:9100"
|
||||
image: "dtagdevsec/head:1710"
|
||||
|
||||
# Emobility service
|
||||
emobility:
|
||||
container_name: emobility
|
||||
restart: always
|
||||
networks:
|
||||
- emobility_local
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
ports:
|
||||
- "8080:8080"
|
||||
image: "dtagdevsec/emobility:1710"
|
||||
volumes:
|
||||
- /data/emobility:/data/eMobility
|
||||
- /data/ews:/data/ews
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
networks:
|
||||
- ewsposter_local
|
||||
image: "dtagdevsec/ewsposter:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Glastopf service
|
||||
glastopf:
|
||||
container_name: glastopf
|
||||
restart: always
|
||||
networks:
|
||||
- glastopf_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: "dtagdevsec/glastopf:1710"
|
||||
volumes:
|
||||
- /data/glastopf/db:/opt/glastopf/db
|
||||
- /data/glastopf/log:/opt/glastopf/log
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: "dtagdevsec/honeytrap:1710"
|
||||
volumes:
|
||||
- /data/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- /data/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- /data/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
restart: always
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:2525"
|
||||
image: "dtagdevsec/mailoney:1710"
|
||||
volumes:
|
||||
- /data/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Netdata service
|
||||
netdata:
|
||||
container_name: netdata
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- apparmor=unconfined
|
||||
image: "dtagdevsec/netdata:1710"
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Rdpy service
|
||||
rdpy:
|
||||
container_name: rdpy
|
||||
restart: always
|
||||
networks:
|
||||
- rdpy_local
|
||||
ports:
|
||||
- "3389:3389"
|
||||
image: "dtagdevsec/rdpy:1710"
|
||||
volumes:
|
||||
- /data/rdpy/log:/var/log/rdpy
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
networks:
|
||||
- spiderfoot_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: "dtagdevsec/spiderfoot:1710"
|
||||
volumes:
|
||||
- /data/spiderfoot/spiderfoot.db:/home/spiderfoot/spiderfoot.db
|
||||
|
||||
# Ui-for-docker service
|
||||
ui-for-docker:
|
||||
container_name: ui-for-docker
|
||||
command: -H unix:///var/run/docker.sock --no-auth
|
||||
restart: always
|
||||
networks:
|
||||
- ui-for-docker_local
|
||||
ports:
|
||||
- "127.0.0.1:64299:9000"
|
||||
image: "dtagdevsec/ui-for-docker:1710"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: "dtagdevsec/suricata:1710"
|
||||
volumes:
|
||||
- /data/suricata/log:/var/log/suricata
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
image: "dtagdevsec/p0f:1710"
|
||||
volumes:
|
||||
- /data/p0f/log:/var/log/p0f
|
||||
|
||||
# Vnclowpot service
|
||||
vnclowpot:
|
||||
container_name: vnclowpot
|
||||
restart: always
|
||||
networks:
|
||||
- vnclowpot_local
|
||||
ports:
|
||||
- "5900:5900"
|
||||
image: "dtagdevsec/vnclowpot:1710"
|
||||
volumes:
|
||||
- /data/vnclowpot/log:/var/log/vnclowpot
|
156
etc/compose/hp.yml
Normal file
156
etc/compose/hp.yml
Normal file
|
@ -0,0 +1,156 @@
|
|||
# T-Pot (HP)
|
||||
# For docker-compose ...
|
||||
version: '2.1'
|
||||
|
||||
networks:
|
||||
cowrie_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
ewsposter_local:
|
||||
glastopf_local:
|
||||
mailoney_local:
|
||||
rdpy_local:
|
||||
vnclowpot_local:
|
||||
|
||||
services:
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
networks:
|
||||
- cowrie_local
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
ports:
|
||||
- "22:2222"
|
||||
- "23:2223"
|
||||
image: "dtagdevsec/cowrie:1710"
|
||||
volumes:
|
||||
- /data/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- /data/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- /data/cowrie/log:/home/cowrie/cowrie/log
|
||||
- /data/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
restart: always
|
||||
networks:
|
||||
- dionaea_local
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "8081:80"
|
||||
- "135:135"
|
||||
- "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "1900:1900/udp"
|
||||
- "3306:3306"
|
||||
- "5060:5060"
|
||||
- "5060:5060/udp"
|
||||
- "5061:5061"
|
||||
- "27017:27017"
|
||||
image: "dtagdevsec/dionaea:1710"
|
||||
volumes:
|
||||
- /data/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- /data/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- /data/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- /data/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- /data/dionaea:/opt/dionaea/var/dionaea
|
||||
- /data/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- /data/dionaea/log:/opt/dionaea/var/log
|
||||
- /data/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# Elasticpot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: "dtagdevsec/elasticpot:1710"
|
||||
volumes:
|
||||
- /data/elasticpot/log:/opt/ElasticpotPY/log
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
networks:
|
||||
- ewsposter_local
|
||||
image: "dtagdevsec/ewsposter:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Glastopf service
|
||||
glastopf:
|
||||
container_name: glastopf
|
||||
restart: always
|
||||
networks:
|
||||
- glastopf_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: "dtagdevsec/glastopf:1710"
|
||||
volumes:
|
||||
- /data/glastopf/db:/opt/glastopf/db
|
||||
- /data/glastopf/log:/opt/glastopf/log
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: "dtagdevsec/honeytrap:1710"
|
||||
volumes:
|
||||
- /data/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- /data/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- /data/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
restart: always
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:2525"
|
||||
image: "dtagdevsec/mailoney:1710"
|
||||
volumes:
|
||||
- /data/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Rdpy service
|
||||
rdpy:
|
||||
container_name: rdpy
|
||||
restart: always
|
||||
networks:
|
||||
- rdpy_local
|
||||
ports:
|
||||
- "3389:3389"
|
||||
image: "dtagdevsec/rdpy:1710"
|
||||
volumes:
|
||||
- /data/rdpy/log:/var/log/rdpy
|
||||
|
||||
# Vnclowpot service
|
||||
vnclowpot:
|
||||
container_name: vnclowpot
|
||||
restart: always
|
||||
networks:
|
||||
- vnclowpot_local
|
||||
ports:
|
||||
- "5900:5900"
|
||||
image: "dtagdevsec/vnclowpot:1710"
|
||||
volumes:
|
||||
- /data/vnclowpot/log:/var/log/vnclowpot
|
176
etc/compose/industrial.yml
Normal file
176
etc/compose/industrial.yml
Normal file
|
@ -0,0 +1,176 @@
|
|||
# T-Pot (Industrial)
|
||||
# For docker-compose ...
|
||||
version: '2.1'
|
||||
|
||||
networks:
|
||||
conpot_local:
|
||||
emobility_local:
|
||||
ewsposter_local:
|
||||
spiderfoot_local:
|
||||
ui-for-docker_local:
|
||||
|
||||
services:
|
||||
|
||||
# Conpot service
|
||||
conpot:
|
||||
container_name: conpot
|
||||
restart: always
|
||||
networks:
|
||||
- conpot_local
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: "dtagdevsec/conpot:1710"
|
||||
volumes:
|
||||
- /data/conpot/log:/var/log/conpot
|
||||
|
||||
# ELK services
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
# - "ES_JAVA_OPTS=-Xms1g -Xmx1g"
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
# mem_limit: 2g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: "dtagdevsec/elasticsearch:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: "dtagdevsec/kibana:1710"
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
env_file:
|
||||
- /opt/tpot/etc/compose/elk_environment
|
||||
image: "dtagdevsec/logstash:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /var/log:/data/host/log
|
||||
|
||||
## Elasticsearch-head service
|
||||
head:
|
||||
container_name: head
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64302:9100"
|
||||
image: "dtagdevsec/head:1710"
|
||||
|
||||
# Emobility service
|
||||
emobility:
|
||||
container_name: emobility
|
||||
restart: always
|
||||
networks:
|
||||
- emobility_local
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
ports:
|
||||
- "8080:8080"
|
||||
image: "dtagdevsec/emobility:1710"
|
||||
volumes:
|
||||
- /data/emobility:/data/eMobility
|
||||
- /data/ews:/data/ews
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
networks:
|
||||
- ewsposter_local
|
||||
image: "dtagdevsec/ewsposter:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Netdata service
|
||||
netdata:
|
||||
container_name: netdata
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- apparmor=unconfined
|
||||
image: "dtagdevsec/netdata:1710"
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
networks:
|
||||
- spiderfoot_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: "dtagdevsec/spiderfoot:1710"
|
||||
volumes:
|
||||
- /data/spiderfoot/spiderfoot.db:/home/spiderfoot/spiderfoot.db
|
||||
|
||||
# Ui-for-docker service
|
||||
ui-for-docker:
|
||||
container_name: ui-for-docker
|
||||
command: -H unix:///var/run/docker.sock --no-auth
|
||||
restart: always
|
||||
networks:
|
||||
- ui-for-docker_local
|
||||
ports:
|
||||
- "127.0.0.1:64299:9000"
|
||||
image: "dtagdevsec/ui-for-docker:1710"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: "dtagdevsec/suricata:1710"
|
||||
volumes:
|
||||
- /data/suricata/log:/var/log/suricata
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
image: "dtagdevsec/p0f:1710"
|
||||
volumes:
|
||||
- /data/p0f/log:/var/log/p0f
|
283
etc/compose/tpot.yml
Normal file
283
etc/compose/tpot.yml
Normal file
|
@ -0,0 +1,283 @@
|
|||
# T-Pot (Standard)
|
||||
# For docker-compose ...
|
||||
version: '2.1'
|
||||
|
||||
networks:
|
||||
cowrie_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
ewsposter_local:
|
||||
glastopf_local:
|
||||
mailoney_local:
|
||||
rdpy_local:
|
||||
spiderfoot_local:
|
||||
ui-for-docker_local:
|
||||
vnclowpot_local:
|
||||
|
||||
services:
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
networks:
|
||||
- cowrie_local
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
ports:
|
||||
- "22:2222"
|
||||
- "23:2223"
|
||||
image: "dtagdevsec/cowrie:1710"
|
||||
volumes:
|
||||
- /data/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- /data/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- /data/cowrie/log:/home/cowrie/cowrie/log
|
||||
- /data/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
restart: always
|
||||
networks:
|
||||
- dionaea_local
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "8081:80"
|
||||
- "135:135"
|
||||
- "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "1900:1900/udp"
|
||||
- "3306:3306"
|
||||
- "5060:5060"
|
||||
- "5060:5060/udp"
|
||||
- "5061:5061"
|
||||
- "27017:27017"
|
||||
image: "dtagdevsec/dionaea:1710"
|
||||
volumes:
|
||||
- /data/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- /data/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- /data/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- /data/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- /data/dionaea:/opt/dionaea/var/dionaea
|
||||
- /data/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- /data/dionaea/log:/opt/dionaea/var/log
|
||||
- /data/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# Elasticpot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: "dtagdevsec/elasticpot:1710"
|
||||
volumes:
|
||||
- /data/elasticpot/log:/opt/ElasticpotPY/log
|
||||
|
||||
# ELK services
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
# mem_limit: 2g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: "dtagdevsec/elasticsearch:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: "dtagdevsec/kibana:1710"
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
env_file:
|
||||
- /opt/tpot/etc/compose/elk_environment
|
||||
image: "dtagdevsec/logstash:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /var/log:/data/host/log
|
||||
|
||||
## Elasticsearch-head service
|
||||
head:
|
||||
container_name: head
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64302:9100"
|
||||
image: "dtagdevsec/head:1710"
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
networks:
|
||||
- ewsposter_local
|
||||
image: "dtagdevsec/ewsposter:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Glastopf service
|
||||
glastopf:
|
||||
container_name: glastopf
|
||||
restart: always
|
||||
networks:
|
||||
- glastopf_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: "dtagdevsec/glastopf:1710"
|
||||
volumes:
|
||||
- /data/glastopf/db:/opt/glastopf/db
|
||||
- /data/glastopf/log:/opt/glastopf/log
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: "dtagdevsec/honeytrap:1710"
|
||||
volumes:
|
||||
- /data/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- /data/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- /data/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
restart: always
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:2525"
|
||||
image: "dtagdevsec/mailoney:1710"
|
||||
volumes:
|
||||
- /data/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Netdata service
|
||||
netdata:
|
||||
container_name: netdata
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- apparmor=unconfined
|
||||
image: "dtagdevsec/netdata:1710"
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Rdpy service
|
||||
rdpy:
|
||||
container_name: rdpy
|
||||
restart: always
|
||||
networks:
|
||||
- rdpy_local
|
||||
ports:
|
||||
- "3389:3389"
|
||||
image: "dtagdevsec/rdpy:1710"
|
||||
volumes:
|
||||
- /data/rdpy/log:/var/log/rdpy
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
networks:
|
||||
- spiderfoot_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: "dtagdevsec/spiderfoot:1710"
|
||||
volumes:
|
||||
- /data/spiderfoot/spiderfoot.db:/home/spiderfoot/spiderfoot.db
|
||||
|
||||
# Ui-for-docker service
|
||||
ui-for-docker:
|
||||
container_name: ui-for-docker
|
||||
command: -H unix:///var/run/docker.sock --no-auth
|
||||
restart: always
|
||||
networks:
|
||||
- ui-for-docker_local
|
||||
ports:
|
||||
- "127.0.0.1:64299:9000"
|
||||
image: "dtagdevsec/ui-for-docker:1710"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: "dtagdevsec/suricata:1710"
|
||||
volumes:
|
||||
- /data/suricata/log:/var/log/suricata
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
image: "dtagdevsec/p0f:1710"
|
||||
volumes:
|
||||
- /data/p0f/log:/var/log/p0f
|
||||
|
||||
# Vnclowpot service
|
||||
vnclowpot:
|
||||
container_name: vnclowpot
|
||||
restart: always
|
||||
networks:
|
||||
- vnclowpot_local
|
||||
ports:
|
||||
- "5900:5900"
|
||||
image: "dtagdevsec/vnclowpot:1710"
|
||||
volumes:
|
||||
- /data/vnclowpot/log:/var/log/vnclowpot
|
26
etc/curator/actions.yml
Normal file
26
etc/curator/actions.yml
Normal file
|
@ -0,0 +1,26 @@
|
|||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete indices older than 90 days (based on index name), for logstash-
|
||||
prefixed indices. Ignore the error if the filter does not result in an
|
||||
actionable list of indices (ignore_empty_list) and exit cleanly.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: logstash-
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: 90
|
21
etc/curator/curator.yml
Normal file
21
etc/curator/curator.yml
Normal file
|
@ -0,0 +1,21 @@
|
|||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
client:
|
||||
hosts:
|
||||
- 127.0.0.1
|
||||
port: 64298
|
||||
url_prefix:
|
||||
use_ssl: False
|
||||
certificate:
|
||||
client_cert:
|
||||
client_key:
|
||||
ssl_no_validate: False
|
||||
http_auth:
|
||||
timeout: 30
|
||||
master_only: False
|
||||
|
||||
logging:
|
||||
loglevel: INFO
|
||||
logfile: /var/log/curator.log
|
||||
logformat: default
|
||||
blacklist: ['elasticsearch', 'urllib3']
|
38
etc/logrotate/logrotate.conf
Normal file
38
etc/logrotate/logrotate.conf
Normal file
|
@ -0,0 +1,38 @@
|
|||
/data/conpot/log/conpot.json
|
||||
/data/conpot/log/conpot.log
|
||||
/data/cowrie/log/cowrie.json
|
||||
/data/cowrie/log/cowrie-textlog.log
|
||||
/data/cowrie/log/lastlog.txt
|
||||
/data/cowrie/log/ttylogs.tgz
|
||||
/data/cowrie/downloads.tgz
|
||||
/data/dionaea/log/dionaea.json
|
||||
/data/dionaea/log/dionaea.sqlite
|
||||
/data/dionaea/bistreams.tgz
|
||||
/data/dionaea/binaries.tgz
|
||||
/data/dionaea/dionaea-errors.log
|
||||
/data/elasticpot/log/elasticpot.log
|
||||
/data/elk/log/*.log
|
||||
/data/emobility/log/centralsystem.log
|
||||
/data/emobility/log/centralsystemEWS.log
|
||||
/data/glastopf/log/glastopf.log
|
||||
/data/glastopf/db/glastopf.db
|
||||
/data/honeytrap/log/*.log
|
||||
/data/honeytrap/log/*.json
|
||||
/data/honeytrap/attacks.tgz
|
||||
/data/honeytrap/downloads.tgz
|
||||
/data/mailoney/log/commands.log
|
||||
/data/p0f/log/p0f.json
|
||||
/data/rdpy/log/rdpy.log
|
||||
/data/suricata/log/*.log
|
||||
/data/suricata/log/*.json
|
||||
/data/vnclowpot/log/vnclowpot.log
|
||||
{
|
||||
su tpot tpot
|
||||
copytruncate
|
||||
create 760 tpot tpot
|
||||
daily
|
||||
missingok
|
||||
notifempty
|
||||
rotate 30
|
||||
compress
|
||||
}
|
BIN
etc/objects/elkbase.tgz
Normal file
BIN
etc/objects/elkbase.tgz
Normal file
Binary file not shown.
BIN
etc/objects/kibana-objects.tgz
Normal file
BIN
etc/objects/kibana-objects.tgz
Normal file
Binary file not shown.
144
host/etc/dialogrc
Normal file
144
host/etc/dialogrc
Normal file
|
@ -0,0 +1,144 @@
|
|||
#
|
||||
# Run-time configuration file for dialog
|
||||
#
|
||||
# Automatically generated by "dialog --create-rc <file>"
|
||||
#
|
||||
#
|
||||
# Types of values:
|
||||
#
|
||||
# Number - <number>
|
||||
# String - "string"
|
||||
# Boolean - <ON|OFF>
|
||||
# Attribute - (foreground,background,highlight?)
|
||||
|
||||
# Set aspect-ration.
|
||||
aspect = 0
|
||||
|
||||
# Set separator (for multiple widgets output).
|
||||
separate_widget = ""
|
||||
|
||||
# Set tab-length (for textbox tab-conversion).
|
||||
tab_len = 0
|
||||
|
||||
# Make tab-traversal for checklist, etc., include the list.
|
||||
visit_items = OFF
|
||||
|
||||
# Shadow dialog boxes? This also turns on color.
|
||||
use_shadow = ON
|
||||
|
||||
# Turn color support ON or OFF
|
||||
use_colors = ON
|
||||
|
||||
# Screen color
|
||||
screen_color = (WHITE,MAGENTA,ON)
|
||||
|
||||
# Shadow color
|
||||
shadow_color = (BLACK,BLACK,ON)
|
||||
|
||||
# Dialog box color
|
||||
dialog_color = (BLACK,WHITE,OFF)
|
||||
|
||||
# Dialog box title color
|
||||
title_color = (MAGENTA,WHITE,OFF)
|
||||
|
||||
# Dialog box border color
|
||||
border_color = (WHITE,WHITE,ON)
|
||||
|
||||
# Active button color
|
||||
button_active_color = (WHITE,MAGENTA,OFF)
|
||||
|
||||
# Inactive button color
|
||||
button_inactive_color = dialog_color
|
||||
|
||||
# Active button key color
|
||||
button_key_active_color = button_active_color
|
||||
|
||||
# Inactive button key color
|
||||
button_key_inactive_color = (RED,WHITE,OFF)
|
||||
|
||||
# Active button label color
|
||||
button_label_active_color = (YELLOW,MAGENTA,ON)
|
||||
|
||||
# Inactive button label color
|
||||
button_label_inactive_color = (BLACK,WHITE,OFF)
|
||||
|
||||
# Input box color
|
||||
inputbox_color = dialog_color
|
||||
|
||||
# Input box border color
|
||||
inputbox_border_color = dialog_color
|
||||
|
||||
# Search box color
|
||||
searchbox_color = dialog_color
|
||||
|
||||
# Search box title color
|
||||
searchbox_title_color = title_color
|
||||
|
||||
# Search box border color
|
||||
searchbox_border_color = border_color
|
||||
|
||||
# File position indicator color
|
||||
position_indicator_color = title_color
|
||||
|
||||
# Menu box color
|
||||
menubox_color = dialog_color
|
||||
|
||||
# Menu box border color
|
||||
menubox_border_color = border_color
|
||||
|
||||
# Item color
|
||||
item_color = dialog_color
|
||||
|
||||
# Selected item color
|
||||
item_selected_color = button_active_color
|
||||
|
||||
# Tag color
|
||||
tag_color = title_color
|
||||
|
||||
# Selected tag color
|
||||
tag_selected_color = button_label_active_color
|
||||
|
||||
# Tag key color
|
||||
tag_key_color = button_key_inactive_color
|
||||
|
||||
# Selected tag key color
|
||||
tag_key_selected_color = (RED,MAGENTA,ON)
|
||||
|
||||
# Check box color
|
||||
check_color = dialog_color
|
||||
|
||||
# Selected check box color
|
||||
check_selected_color = button_active_color
|
||||
|
||||
# Up arrow color
|
||||
uarrow_color = (MAGENTA,WHITE,ON)
|
||||
|
||||
# Down arrow color
|
||||
darrow_color = uarrow_color
|
||||
|
||||
# Item help-text color
|
||||
itemhelp_color = (WHITE,BLACK,OFF)
|
||||
|
||||
# Active form text color
|
||||
form_active_text_color = button_active_color
|
||||
|
||||
# Form text color
|
||||
form_text_color = (WHITE,CYAN,ON)
|
||||
|
||||
# Readonly form item color
|
||||
form_item_readonly_color = (CYAN,WHITE,ON)
|
||||
|
||||
# Dialog box gauge color
|
||||
gauge_color = title_color
|
||||
|
||||
# Dialog box border2 color
|
||||
border2_color = dialog_color
|
||||
|
||||
# Input box border2 color
|
||||
inputbox_border2_color = dialog_color
|
||||
|
||||
# Search box border2 color
|
||||
searchbox_border2_color = dialog_color
|
||||
|
||||
# Menu box border2 color
|
||||
menubox_border2_color = dialog_color
|
20
host/etc/issue
Normal file
20
host/etc/issue
Normal file
|
@ -0,0 +1,20 @@
|
|||
[H[2J
|
||||
[0;35m┌───────────────[1;35m────────────────[0;37m───────────────┐[0m
|
||||
[0;35m│[0m [0;35m_____[0m [1;35m____[0m [1;35m_[0m [0;37m_[0m [0;37m_____[0m [0;37m_[0m [0;1;30;90m___[0m [0;1;30;90m│[0m
|
||||
[1;35m│|_[0m [1;35m_|[0m [1;35m|[0m [1;35m_[0m [0;37m\\[0m [0;37m___[0m [0;37m|[0m [0;37m|_[0m [0;37m/[0m [0;1;30;90m|___[0m [0;1;30;90m/[0m [0;1;30;90m|/[0m [0;1;30;90m_[0m [0;1;30;90m\\[0m [0;1;30;90m│[0m
|
||||
[1;35m│[0m [1;35m|[0m [1;35m|__[0;37m___|[0m [0;37m|_)[0m [0;37m/[0m [0;37m_[0m [0;37m\\|[0m [0;1;30;90m__|[0m [0;1;30;90m|[0m [0;1;30;90m|[0m [0;1;30;90m/[0m [0;1;30;90m/|[0m [0;35m|[0m [0;35m|[0m [0;35m|[0m [0;35m|│[0m
|
||||
[0;37m│[0m [0;37m|[0m [0;37m|_____|[0m [0;37m__[0;1;30;90m/[0m [0;1;30;90m(_)[0m [0;1;30;90m|[0m [0;1;30;90m|_[0m [0;1;30;90m|[0m [0;35m|[0m [0;35m/[0m [0;35m/_|[0m [0;35m|[0m [0;35m|_|[0m [0;35m|│[0m
|
||||
[0;37m│[0m [0;37m|_|[0m [0;1;30;90m|_|[0m [0;1;30;90m\\___/[0m [0;1;30;90m\\[0;35m__|[0m [0;35m|_|/_/(_)_[1;35m|\\___/[0m [1;35m│[0m
|
||||
[0;1;30;90m│[0m [1;35m│[0m
|
||||
[0;1;30;90m└───────[0;35m────────────────[1;35m────────────────[0;37m───────┘[0m
|
||||
|
||||
|
||||
,---- [ [1;35m\n[0m ] [ [0;35m\d[0m ] [ [1;30m\t[0m ]
|
||||
|
|
||||
| [1;35mIP:[0m
|
||||
| [0;35mSSH:[0m
|
||||
| [1;30mWEB:[0m
|
||||
|
|
||||
`----
|
||||
|
||||
|
96
host/etc/nginx/nginx.conf
Normal file
96
host/etc/nginx/nginx.conf
Normal file
|
@ -0,0 +1,96 @@
|
|||
user www-data;
|
||||
worker_processes auto;
|
||||
pid /run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 768;
|
||||
# multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
##
|
||||
# Basic Settings
|
||||
##
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
# server_tokens off;
|
||||
|
||||
# server_names_hash_bucket_size 64;
|
||||
# server_name_in_redirect off;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
##
|
||||
# SSL Settings
|
||||
##
|
||||
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
##
|
||||
# Logging Settings
|
||||
##
|
||||
|
||||
log_format le_json '{ "timestamp": "$time_iso8601", '
|
||||
'"src_ip": "$remote_addr", '
|
||||
'"remote_user": "$remote_user", '
|
||||
'"body_bytes_sent": "$body_bytes_sent", '
|
||||
'"request_time": "$request_time", '
|
||||
'"status": "$status", '
|
||||
'"request": "$request", '
|
||||
'"request_method": "$request_method", '
|
||||
'"http_referrer": "$http_referer", '
|
||||
'"http_user_agent": "$http_user_agent" }';
|
||||
|
||||
access_log /var/log/nginx/access.log le_json;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
##
|
||||
# Gzip Settings
|
||||
##
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
# gzip_vary on;
|
||||
# gzip_proxied any;
|
||||
# gzip_comp_level 6;
|
||||
# gzip_buffers 16 8k;
|
||||
# gzip_http_version 1.1;
|
||||
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
##
|
||||
# Virtual Host Configs
|
||||
##
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
}
|
||||
|
||||
|
||||
#mail {
|
||||
# # See sample authentication script at:
|
||||
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
|
||||
#
|
||||
# # auth_http localhost/auth.php;
|
||||
# # pop3_capabilities "TOP" "USER";
|
||||
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
|
||||
#
|
||||
# server {
|
||||
# listen localhost:110;
|
||||
# protocol pop3;
|
||||
# proxy on;
|
||||
# }
|
||||
#
|
||||
# server {
|
||||
# listen localhost:143;
|
||||
# protocol imap;
|
||||
# proxy on;
|
||||
# }
|
||||
#}
|
13
host/etc/nginx/ssl/dhparam4096.pem
Normal file
13
host/etc/nginx/ssl/dhparam4096.pem
Normal file
|
@ -0,0 +1,13 @@
|
|||
-----BEGIN DH PARAMETERS-----
|
||||
MIICCAKCAgEAiHmfakVLOStSULBdaTbZY/zeFyEeQ19GY9Z5CJg06dIIgIzhxk9L
|
||||
4xsQdQk8giKOjP6SfX0ZgF5CYaurQ3ljYlP0UlAQQo9+fEErbqj3hCzAxtIpd6Yj
|
||||
SV6zFdnSjwxWuKAPPywiQNljnHH+Y1KBdbl5VQ9gC3ehtaLo1A4y8q96f6fC5rGU
|
||||
nfgw4lTxLvPD7NwaOdFTCyK8tTxvUGNJIvf7805IxZ0BvAiBuVaXStaMcqf5BHLP
|
||||
fYpvIiVaCrtto4elu18nL0tf2CN5n9ai4hlr0nPmNrE/Zrrur78Re5F4Ien9kr4d
|
||||
xabXvVJJQa9j2NdQO7vk7Cz/dAIiqt/1XKFhll4TTYBqrFVXIwF+FNx636zyOjcO
|
||||
nlZk/V+IL/UTPnZOv2PGt5+WetvJJubi6B9XgOgVLduI07woAp5qnRJJt6fJW1aA
|
||||
M86By6WLy5P31Py6eFj8nYgj1V703XgQ5lESKYpeVgqA0bh7daNzOCoGQvvUKlTP
|
||||
RTu6fs7clw5ta4yYUyvuIKTngH5yGBNdTuP0GWo6Y+Dy1BctVwl2xSw+FhYeuIf/
|
||||
EB2A3129H59HhbWyNH337+1dfntHfQRXBsT0YSyDxPurI5/FNGcmw+GZEYk4BB8j
|
||||
g7TwH3GBjbKnjnr7SnhanqmWgybgQw6oR9gDC399eR4LiOk9sbxpX1MCAQI=
|
||||
-----END DH PARAMETERS-----
|
12
host/etc/nginx/ssl/gen-cert.sh
Normal file
12
host/etc/nginx/ssl/gen-cert.sh
Normal file
|
@ -0,0 +1,12 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Got root?
|
||||
myWHOAMI=$(whoami)
|
||||
if [ "$myWHOAMI" != "root" ]
|
||||
then
|
||||
echo "Need to run as root ..."
|
||||
exit
|
||||
fi
|
||||
|
||||
openssl req -nodes -x509 -sha512 -newkey rsa:8192 -keyout "nginx.key" -out "nginx.crt" -days 3650
|
||||
|
16
host/etc/nginx/ssl/gen-dhparam.sh
Normal file
16
host/etc/nginx/ssl/gen-dhparam.sh
Normal file
|
@ -0,0 +1,16 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Got root?
|
||||
myWHOAMI=$(whoami)
|
||||
if [ "$myWHOAMI" != "root" ]
|
||||
then
|
||||
echo "Need to run as root ..."
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ "$1" = "2048" ] || [ "$1" = "4096" ] || [ "$1" = "8192" ]
|
||||
then
|
||||
openssl dhparam -outform PEM -out dhparam$1.pem $1
|
||||
else
|
||||
echo "Usage: ./gen-dhparam [2048, 4096, 8192]..."
|
||||
fi
|
155
host/etc/nginx/tpotweb.conf
Normal file
155
host/etc/nginx/tpotweb.conf
Normal file
|
@ -0,0 +1,155 @@
|
|||
############################################
|
||||
### NGINX T-Pot configuration file by mo ###
|
||||
############################################
|
||||
|
||||
###################################
|
||||
### Allow for 60 reloads per minute
|
||||
###################################
|
||||
limit_req_zone $binary_remote_addr zone=base:1m rate=1r/s;
|
||||
|
||||
server {
|
||||
|
||||
#########################
|
||||
### Basic server settings
|
||||
#########################
|
||||
listen 64297 ssl http2;
|
||||
index tpotweb.html;
|
||||
ssl_protocols TLSv1.2;
|
||||
server_name example.com;
|
||||
error_page 300 301 302 400 401 402 403 404 500 501 502 503 504 /error.html;
|
||||
|
||||
|
||||
##############################################
|
||||
### Remove version number add different header
|
||||
##############################################
|
||||
server_tokens off;
|
||||
more_set_headers 'Server: apache';
|
||||
|
||||
|
||||
##############################################
|
||||
### SSL settings and Cipher Suites
|
||||
##############################################
|
||||
ssl_certificate /etc/nginx/ssl/nginx.crt;
|
||||
ssl_certificate_key /etc/nginx/ssl/nginx.key;
|
||||
|
||||
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH:!DHE:!SHA:!SHA256';
|
||||
ssl_ecdh_curve secp384r1;
|
||||
ssl_dhparam /etc/nginx/ssl/dhparam4096.pem;
|
||||
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
|
||||
|
||||
####################################
|
||||
### OWASP recommendations / settings
|
||||
####################################
|
||||
|
||||
### Size Limits & Buffer Overflows
|
||||
### the size may be configured based on the needs.
|
||||
client_body_buffer_size 100K;
|
||||
client_header_buffer_size 1k;
|
||||
client_max_body_size 100k;
|
||||
large_client_header_buffers 2 1k;
|
||||
|
||||
### Mitigate Slow HHTP DoS Attack
|
||||
### Timeouts definition ##
|
||||
client_body_timeout 10;
|
||||
client_header_timeout 10;
|
||||
keepalive_timeout 5 5;
|
||||
send_timeout 10;
|
||||
|
||||
### X-Frame-Options is to prevent from clickJacking attack
|
||||
add_header X-Frame-Options SAMEORIGIN;
|
||||
|
||||
### disable content-type sniffing on some browsers.
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
|
||||
### This header enables the Cross-site scripting (XSS) filter
|
||||
add_header X-XSS-Protection "1; mode=block";
|
||||
|
||||
### This will enforce HTTP browsing into HTTPS and avoid ssl stripping attack
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains;";
|
||||
|
||||
|
||||
##################################
|
||||
### Restrict access and basic auth
|
||||
##################################
|
||||
|
||||
# satisfy all;
|
||||
satisfy any;
|
||||
|
||||
# allow 10.0.0.0/8;
|
||||
# allow 172.16.0.0/12;
|
||||
# allow 192.168.0.0/16;
|
||||
allow 127.0.0.1;
|
||||
allow ::1;
|
||||
deny all;
|
||||
|
||||
auth_basic "closed site";
|
||||
auth_basic_user_file /etc/nginx/nginxpasswd;
|
||||
|
||||
|
||||
##############################
|
||||
### Limit brute-force attempts
|
||||
##############################
|
||||
location = / {
|
||||
limit_req zone=base burst=1 nodelay;
|
||||
}
|
||||
|
||||
|
||||
#################
|
||||
### Proxied sites
|
||||
#################
|
||||
|
||||
### Kibana
|
||||
location /kibana/ {
|
||||
proxy_pass http://localhost:64296;
|
||||
rewrite /kibana/(.*)$ /$1 break;
|
||||
}
|
||||
|
||||
### ES
|
||||
location /es/ {
|
||||
proxy_pass http://localhost:64298/;
|
||||
rewrite /es/(.*)$ /$1 break;
|
||||
}
|
||||
|
||||
### head standalone
|
||||
location /myhead/ {
|
||||
proxy_pass http://localhost:64302/;
|
||||
rewrite /myhead/(.*)$ /$1 break;
|
||||
}
|
||||
|
||||
### portainer
|
||||
location /ui {
|
||||
proxy_pass http://127.0.0.1:64299;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $http_connection;
|
||||
proxy_set_header Host $host;
|
||||
proxy_redirect off;
|
||||
rewrite /ui/(.*)$ /$1 break;
|
||||
}
|
||||
### web tty
|
||||
location /wetty {
|
||||
proxy_pass http://127.0.0.1:64300/wetty;
|
||||
}
|
||||
|
||||
### netdata
|
||||
location /netdata/ {
|
||||
proxy_pass http://localhost:64301;
|
||||
rewrite /netdata/(.*)$ /$1 break;
|
||||
}
|
||||
|
||||
### spiderfoot
|
||||
location /spiderfoot {
|
||||
proxy_pass http://127.0.0.1:64303;
|
||||
}
|
||||
|
||||
location /static {
|
||||
proxy_pass http://127.0.0.1:64303/spiderfoot/static;
|
||||
}
|
||||
|
||||
location /scanviz {
|
||||
proxy_pass http://127.0.0.1:64303/spiderfoot/scanviz;
|
||||
}
|
||||
}
|
2
host/etc/rc.local
Executable file
2
host/etc/rc.local
Executable file
|
@ -0,0 +1,2 @@
|
|||
#!/bin/bash
|
||||
exit 0
|
57
host/etc/systemd/tpot.service
Normal file
57
host/etc/systemd/tpot.service
Normal file
|
@ -0,0 +1,57 @@
|
|||
[Unit]
|
||||
Description=tpot
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
# Get and set internal, external IP infos, but ignore errors
|
||||
ExecStartPre=-/opt/tpot/bin/updateip.sh
|
||||
|
||||
# Clear state or if persistence is enabled rotate and compress logs from /data
|
||||
ExecStartPre=-/bin/bash -c '/opt/tpot/bin/clean.sh on'
|
||||
|
||||
# Remove old containers, images and volumes
|
||||
ExecStartPre=-/usr/local/bin/docker-compose -f /opt/tpot/etc/tpot.yml down -v
|
||||
ExecStartPre=-/usr/local/bin/docker-compose -f /opt/tpot/etc/tpot.yml rm -v
|
||||
ExecStartPre=-/bin/bash -c 'docker volume rm $(docker volume ls -q)'
|
||||
ExecStartPre=-/bin/bash -c 'docker rm -v $(docker ps -aq)'
|
||||
ExecStartPre=-/bin/bash -c 'docker rmi $(docker images | grep "<none>" | awk \'{print $3}\')'
|
||||
|
||||
# Get IF, disable offloading, enable promiscious mode for p0f and suricata
|
||||
ExecStartPre=/bin/bash -c '/sbin/ethtool --offload $(/sbin/ip address | grep "^2: " | awk \'{ print $2 }\' | tr -d [:punct:]) rx off tx off'
|
||||
ExecStartPre=/bin/bash -c '/sbin/ethtool -K $(/sbin/ip address | grep "^2: " | awk \'{ print $2 }\' | tr -d [:punct:]) gso off gro off'
|
||||
ExecStartPre=/bin/bash -c '/sbin/ip link set $(/sbin/ip address | grep "^2: " | awk \'{ print $2 }\' | tr -d [:punct:]) promisc on'
|
||||
|
||||
# Modify access rights on docker.sock for netdata
|
||||
ExecStartPre=-/bin/chmod 666 /var/run/docker.sock
|
||||
|
||||
# Set iptables accept rules to avoid forwarding to honeytrap / NFQUEUE
|
||||
# Forward all other connections to honeytrap / NFQUEUE
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -s 127.0.0.1 -j ACCEPT
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -d 127.0.0.1 -j ACCEPT
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -p tcp -m multiport --dports 64295:64303,7634 -j ACCEPT
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -p tcp -m multiport --dports 20:23,25,42,69,80,135,443,445,1433,1723,1883,1900 -j ACCEPT
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -p tcp -m multiport --dports 3306,3389,5060,5061,5601,5900,27017 -j ACCEPT
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -p tcp -m multiport --dports 1025,50100,8080,8081,9200 -j ACCEPT
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -p tcp --syn -m state --state NEW -j NFQUEUE
|
||||
|
||||
# Compose T-Pot up
|
||||
ExecStart=/usr/local/bin/docker-compose -f /opt/tpot/etc/tpot.yml up --no-color
|
||||
|
||||
# Compose T-Pot down, remove containers and volumes
|
||||
ExecStop=/usr/local/bin/docker-compose -f /opt/tpot/etc/tpot.yml down -v
|
||||
|
||||
# Remove only previously set iptables rules
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -s 127.0.0.1 -j ACCEPT
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -d 127.0.0.1 -j ACCEPT
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -p tcp -m multiport --dports 64295:64303,7634 -j ACCEPT
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -p tcp -m multiport --dports 20:23,25,42,69,80,135,443,445,1433,1723,1883,1900 -j ACCEPT
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -p tcp -m multiport --dports 3306,3389,5060,5061,5601,5900,27017 -j ACCEPT
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -p tcp -m multiport --dports 1025,50100,8080,8081,9200 -j ACCEPT
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -p tcp --syn -m state --state NEW -j NFQUEUE
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
13
host/etc/systemd/wetty.service
Normal file
13
host/etc/systemd/wetty.service
Normal file
|
@ -0,0 +1,13 @@
|
|||
[Unit]
|
||||
Description=wetty
|
||||
Requires=sshd.service
|
||||
After=sshd.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
User=tsec
|
||||
Group=tsec
|
||||
ExecStart=/usr/bin/node /usr/local/lib/node_modules/wetty/app.js -p 64300 --host 127.0.0.1 --sshhost 127.0.0.1 --sshport 64295
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
1466
host/usr/share/dict/a.txt
Normal file
1466
host/usr/share/dict/a.txt
Normal file
File diff suppressed because it is too large
Load diff
4401
host/usr/share/dict/n.txt
Normal file
4401
host/usr/share/dict/n.txt
Normal file
File diff suppressed because it is too large
Load diff
3947
host/usr/share/dict/names
Normal file
3947
host/usr/share/dict/names
Normal file
File diff suppressed because it is too large
Load diff
0
host/usr/share/nginx/html/error.html
Normal file
0
host/usr/share/nginx/html/error.html
Normal file
BIN
host/usr/share/nginx/html/favicon.ico
Normal file
BIN
host/usr/share/nginx/html/favicon.ico
Normal file
Binary file not shown.
After Width: 48px | Height: 48px | Size: 805 B |
21
host/usr/share/nginx/html/navbar.html
Normal file
21
host/usr/share/nginx/html/navbar.html
Normal file
|
@ -0,0 +1,21 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en_US">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>T-Pot</title>
|
||||
</head>
|
||||
<link href="style.css" rel="stylesheet" type="text/css"/>
|
||||
|
||||
<body bgcolor="#E20074">
|
||||
<center>
|
||||
<a href="/tpotweb.html" target="_top" class="btn">Home</a>
|
||||
<a href="/kibana" target="main" class="btn">Kibana</a>
|
||||
<a href="/myhead/" target="main" class="btn">ES Head</a>
|
||||
<a href="/netdata/" target="_blank" class="btn">Netdata</a>
|
||||
<a href="/spiderfoot/" target="main" class="btn">Spiderfoot</a>
|
||||
<a href="/ui/" target="main" class="btn">Portainer</a>
|
||||
<a href="/wetty/ssh/tsec" target="main" class="btn">WebTTY</a>
|
||||
</center>
|
||||
</body>
|
||||
</html>
|
17
host/usr/share/nginx/html/style.css
Normal file
17
host/usr/share/nginx/html/style.css
Normal file
|
@ -0,0 +1,17 @@
|
|||
.btn {
|
||||
-webkit-border-radius: 0;
|
||||
-moz-border-radius: 0;
|
||||
border-radius: 0px;
|
||||
font-family: Arial;
|
||||
color: #ffffff;
|
||||
font-size: 12px;
|
||||
background: #E20074;
|
||||
padding: 2px 30px 2px 30px;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.btn:hover {
|
||||
background: #c2c2c2;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
15
host/usr/share/nginx/html/tpotweb.html
Normal file
15
host/usr/share/nginx/html/tpotweb.html
Normal file
|
@ -0,0 +1,15 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en_US">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>T-Pot</title>
|
||||
</head>
|
||||
|
||||
<frameset rows='20,*' border='0' frameborder='0' framespacing='0'>
|
||||
<frame src='navbar.html' name='navbar' marginwidth='0' marginheight='0' scrolling='no' noresize>
|
||||
<frame src='/kibana' name='main' marginwidth='0' marginheight='0' scrolling='auto' noresize>
|
||||
<noframes>
|
||||
</noframes>
|
||||
</frameset>
|
||||
</html>
|
144
iso/installer/dialogrc
Normal file
144
iso/installer/dialogrc
Normal file
|
@ -0,0 +1,144 @@
|
|||
#
|
||||
# Run-time configuration file for dialog
|
||||
#
|
||||
# Automatically generated by "dialog --create-rc <file>"
|
||||
#
|
||||
#
|
||||
# Types of values:
|
||||
#
|
||||
# Number - <number>
|
||||
# String - "string"
|
||||
# Boolean - <ON|OFF>
|
||||
# Attribute - (foreground,background,highlight?)
|
||||
|
||||
# Set aspect-ration.
|
||||
aspect = 0
|
||||
|
||||
# Set separator (for multiple widgets output).
|
||||
separate_widget = ""
|
||||
|
||||
# Set tab-length (for textbox tab-conversion).
|
||||
tab_len = 0
|
||||
|
||||
# Make tab-traversal for checklist, etc., include the list.
|
||||
visit_items = OFF
|
||||
|
||||
# Shadow dialog boxes? This also turns on color.
|
||||
use_shadow = ON
|
||||
|
||||
# Turn color support ON or OFF
|
||||
use_colors = ON
|
||||
|
||||
# Screen color
|
||||
screen_color = (WHITE,MAGENTA,ON)
|
||||
|
||||
# Shadow color
|
||||
shadow_color = (BLACK,BLACK,ON)
|
||||
|
||||
# Dialog box color
|
||||
dialog_color = (BLACK,WHITE,OFF)
|
||||
|
||||
# Dialog box title color
|
||||
title_color = (MAGENTA,WHITE,OFF)
|
||||
|
||||
# Dialog box border color
|
||||
border_color = (WHITE,WHITE,ON)
|
||||
|
||||
# Active button color
|
||||
button_active_color = (WHITE,MAGENTA,OFF)
|
||||
|
||||
# Inactive button color
|
||||
button_inactive_color = dialog_color
|
||||
|
||||
# Active button key color
|
||||
button_key_active_color = button_active_color
|
||||
|
||||
# Inactive button key color
|
||||
button_key_inactive_color = (RED,WHITE,OFF)
|
||||
|
||||
# Active button label color
|
||||
button_label_active_color = (YELLOW,MAGENTA,ON)
|
||||
|
||||
# Inactive button label color
|
||||
button_label_inactive_color = (BLACK,WHITE,OFF)
|
||||
|
||||
# Input box color
|
||||
inputbox_color = dialog_color
|
||||
|
||||
# Input box border color
|
||||
inputbox_border_color = dialog_color
|
||||
|
||||
# Search box color
|
||||
searchbox_color = dialog_color
|
||||
|
||||
# Search box title color
|
||||
searchbox_title_color = title_color
|
||||
|
||||
# Search box border color
|
||||
searchbox_border_color = border_color
|
||||
|
||||
# File position indicator color
|
||||
position_indicator_color = title_color
|
||||
|
||||
# Menu box color
|
||||
menubox_color = dialog_color
|
||||
|
||||
# Menu box border color
|
||||
menubox_border_color = border_color
|
||||
|
||||
# Item color
|
||||
item_color = dialog_color
|
||||
|
||||
# Selected item color
|
||||
item_selected_color = button_active_color
|
||||
|
||||
# Tag color
|
||||
tag_color = title_color
|
||||
|
||||
# Selected tag color
|
||||
tag_selected_color = button_label_active_color
|
||||
|
||||
# Tag key color
|
||||
tag_key_color = button_key_inactive_color
|
||||
|
||||
# Selected tag key color
|
||||
tag_key_selected_color = (RED,MAGENTA,ON)
|
||||
|
||||
# Check box color
|
||||
check_color = dialog_color
|
||||
|
||||
# Selected check box color
|
||||
check_selected_color = button_active_color
|
||||
|
||||
# Up arrow color
|
||||
uarrow_color = (MAGENTA,WHITE,ON)
|
||||
|
||||
# Down arrow color
|
||||
darrow_color = uarrow_color
|
||||
|
||||
# Item help-text color
|
||||
itemhelp_color = (WHITE,BLACK,OFF)
|
||||
|
||||
# Active form text color
|
||||
form_active_text_color = button_active_color
|
||||
|
||||
# Form text color
|
||||
form_text_color = (WHITE,CYAN,ON)
|
||||
|
||||
# Readonly form item color
|
||||
form_item_readonly_color = (CYAN,WHITE,ON)
|
||||
|
||||
# Dialog box gauge color
|
||||
gauge_color = title_color
|
||||
|
||||
# Dialog box border2 color
|
||||
border2_color = dialog_color
|
||||
|
||||
# Input box border2 color
|
||||
inputbox_border2_color = dialog_color
|
||||
|
||||
# Search box border2 color
|
||||
searchbox_border2_color = dialog_color
|
||||
|
||||
# Menu box border2 color
|
||||
menubox_border2_color = dialog_color
|
509
iso/installer/install.sh
Executable file
509
iso/installer/install.sh
Executable file
|
@ -0,0 +1,509 @@
|
|||
#!/bin/bash
|
||||
# T-Pot post install script
|
||||
|
||||
# Set TERM, DIALOGRC
|
||||
export TERM=linux
|
||||
export DIALOGRC=/etc/dialogrc
|
||||
|
||||
# Let's load dialog color theme
|
||||
cp /root/installer/dialogrc /etc/
|
||||
|
||||
# Some global vars
|
||||
myPROXYFILEPATH="/root/installer/proxy"
|
||||
myNTPCONFPATH="/root/installer/ntp"
|
||||
myPFXPATH="/root/installer/keys/8021x.pfx"
|
||||
myPFXPWPATH="/root/installer/keys/8021x.pw"
|
||||
myPFXHOSTIDPATH="/root/installer/keys/8021x.id"
|
||||
myTPOTCOMPOSE="/opt/tpot/etc/tpot.yml"
|
||||
myBACKTITLE="T-Pot-Installer"
|
||||
mySITES="https://index.docker.io https://github.com https://pypi.python.org https://ubuntu.com"
|
||||
myPROGRESSBOXCONF=" --backtitle "$myBACKTITLE" --progressbox 24 80"
|
||||
|
||||
fuRANDOMWORD () {
|
||||
local myWORDFILE="$1"
|
||||
local myLINES=$(cat $myWORDFILE | wc -l)
|
||||
local myRANDOM=$((RANDOM % $myLINES))
|
||||
local myNUM=$((myRANDOM * myRANDOM % $myLINES + 1))
|
||||
echo -n $(sed -n "$myNUM p" $myWORDFILE | tr -d \' | tr A-Z a-z)
|
||||
}
|
||||
|
||||
# Let's wait a few seconds to avoid interference with service messages
|
||||
sleep 3
|
||||
tput civis
|
||||
dialog --no-ok --no-cancel --backtitle "$myBACKTITLE" --title "[ Wait to avoid interference with service messages ]" --pause "" 6 80 7
|
||||
|
||||
# Let's setup the proxy for env
|
||||
if [ -f $myPROXYFILEPATH ];
|
||||
then
|
||||
dialog --title "[ Setting up the proxy ]" $myPROGRESSBOXCONF <<EOF
|
||||
EOF
|
||||
myPROXY=$(cat $myPROXYFILEPATH)
|
||||
tee -a /etc/environment 2>&1>/dev/null <<EOF
|
||||
export http_proxy=$myPROXY
|
||||
export https_proxy=$myPROXY
|
||||
export HTTP_PROXY=$myPROXY
|
||||
export HTTPS_PROXY=$myPROXY
|
||||
export no_proxy=localhost,127.0.0.1,.sock
|
||||
EOF
|
||||
source /etc/environment
|
||||
|
||||
# Let's setup the proxy for apt
|
||||
tee /etc/apt/apt.conf 2>&1>/dev/null <<EOF
|
||||
Acquire::http::Proxy "$myPROXY";
|
||||
Acquire::https::Proxy "$myPROXY";
|
||||
EOF
|
||||
|
||||
# Let's add proxy settings to docker defaults
|
||||
myPROXY=$(cat $myPROXYFILEPATH)
|
||||
tee -a /etc/default/docker 2>&1>/dev/null <<EOF
|
||||
http_proxy=$myPROXY
|
||||
https_proxy=$myPROXY
|
||||
HTTP_PROXY=$myPROXY
|
||||
HTTPS_PROXY=$myPROXY
|
||||
no_proxy=localhost,127.0.0.1,.sock
|
||||
EOF
|
||||
|
||||
# Let's restart docker for proxy changes to take effect
|
||||
systemctl stop docker 2>&1 | dialog --title "[ Stop docker service ]" $myPROGRESSBOXCONF
|
||||
systemctl start docker 2>&1 | dialog --title "[ Start docker service ]" $myPROGRESSBOXCONF
|
||||
fi
|
||||
|
||||
# Let's test the internet connection
|
||||
mySITESCOUNT=$(echo $mySITES | wc -w)
|
||||
j=0
|
||||
for i in $mySITES;
|
||||
do
|
||||
dialog --title "[ Testing the internet connection ]" --backtitle "$myBACKTITLE" \
|
||||
--gauge "\n Now checking: $i\n" 8 80 $(expr 100 \* $j / $mySITESCOUNT) <<EOF
|
||||
EOF
|
||||
curl --connect-timeout 5 -IsS $i 2>&1>/dev/null
|
||||
if [ $? -ne 0 ];
|
||||
then
|
||||
dialog --backtitle "$myBACKTITLE" --title "[ Continue? ]" --yesno "\nInternet connection test failed. This might indicate some problems with your connection. You can continue, but the installation might fail." 10 50
|
||||
if [ $? = 1 ];
|
||||
then
|
||||
dialog --backtitle "$myBACKTITLE" --title "[ Abort ]" --msgbox "\nInstallation aborted. Exiting the installer." 7 50
|
||||
exit
|
||||
else
|
||||
break;
|
||||
fi;
|
||||
fi;
|
||||
let j+=1
|
||||
dialog --title "[ Testing the internet connection ]" --backtitle "$myBACKTITLE" \
|
||||
--gauge "\n Now checking: $i\n" 8 80 $(expr 100 \* $j / $mySITESCOUNT) <<EOF
|
||||
EOF
|
||||
done;
|
||||
|
||||
# Let's remove NGINX default website
|
||||
#fuECHO "### Removing NGINX default website."
|
||||
rm -rf /etc/nginx/sites-enabled/default 2>&1 | dialog --title "[ Removing NGINX default website. ]" $myPROGRESSBOXCONF;
|
||||
rm -rf /etc/nginx/sites-available/default 2>&1 | dialog --title "[ Removing NGINX default website. ]" $myPROGRESSBOXCONF;
|
||||
rm -rf /usr/share/nginx/html/index.html 2>&1 | dialog --title "[ Removing NGINX default website. ]" $myPROGRESSBOXCONF;
|
||||
|
||||
# Let's ask user for install flavor
|
||||
# Install types are TPOT, HP, INDUSTRIAL, ALL
|
||||
tput cnorm
|
||||
myFLAVOR=$(dialog --no-cancel --backtitle "$myBACKTITLE" --title "[ Choose your edition ]" --no-tags --menu \
|
||||
"\nRequired: 4GB RAM, 64GB disk\nRecommended: 8GB RAM, 128GB SSD" 14 60 4 \
|
||||
"TPOT" "Standard Honeypots, Suricata & ELK" \
|
||||
"HP" "Honeypots only, w/o Suricata & ELK" \
|
||||
"INDUSTRIAL" "Conpot, eMobility, Suricata & ELK" \
|
||||
"EVERYTHING" "Everything" 3>&1 1>&2 2>&3 3>&-)
|
||||
|
||||
# Let's ask for a secure tsec password
|
||||
myUSER="tsec"
|
||||
myPASS1="pass1"
|
||||
myPASS2="pass2"
|
||||
mySECURE="0"
|
||||
while [ "$myPASS1" != "$myPASS2" ] && [ "$mySECURE" == "0" ]
|
||||
do
|
||||
while [ "$myPASS1" == "pass1" ] || [ "$myPASS1" == "" ]
|
||||
do
|
||||
myPASS1=$(dialog --insecure --backtitle "$myBACKTITLE" \
|
||||
--title "[ Enter password for console user (tsec) ]" \
|
||||
--passwordbox "\nPassword" 9 60 3>&1 1>&2 2>&3 3>&-)
|
||||
done
|
||||
myPASS2=$(dialog --insecure --backtitle "$myBACKTITLE" \
|
||||
--title "[ Repeat password for console user (tsec) ]" \
|
||||
--passwordbox "\nPassword" 9 60 3>&1 1>&2 2>&3 3>&-)
|
||||
if [ "$myPASS1" != "$myPASS2" ];
|
||||
then
|
||||
dialog --backtitle "$myBACKTITLE" --title "[ Passwords do not match. ]" \
|
||||
--msgbox "\nPlease re-enter your password." 7 60
|
||||
myPASS1="pass1"
|
||||
myPASS2="pass2"
|
||||
fi
|
||||
mySECURE=$(printf "%s" "$myPASS1" | cracklib-check | grep -c "OK")
|
||||
if [ "$mySECURE" == "0" ] && [ "$myPASS1" == "$myPASS2" ];
|
||||
then
|
||||
dialog --backtitle "$myBACKTITLE" --title "[ Password is not secure ]" --defaultno --yesno "\nKeep insecure password?" 7 50
|
||||
myOK=$?
|
||||
if [ "$myOK" == "1" ];
|
||||
then
|
||||
myPASS1="pass1"
|
||||
myPASS2="pass2"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
printf "%s" "$myUSER:$myPASS1" | chpasswd
|
||||
|
||||
# Let's ask for a web username with secure password
|
||||
myOK="1"
|
||||
myUSER="tsec"
|
||||
myPASS1="pass1"
|
||||
myPASS2="pass2"
|
||||
mySECURE="0"
|
||||
while [ 1 != 2 ]
|
||||
do
|
||||
myUSER=$(dialog --backtitle "$myBACKTITLE" --title "[ Enter your web user name ]" --inputbox "\nUsername (tsec not allowed)" 9 50 3>&1 1>&2 2>&3 3>&-)
|
||||
myUSER=$(echo $myUSER | tr -cd "[:alnum:]_.-")
|
||||
dialog --backtitle "$myBACKTITLE" --title "[ Your username is ]" --yesno "\n$myUSER" 7 50
|
||||
myOK=$?
|
||||
if [ "$myOK" = "0" ] && [ "$myUSER" != "tsec" ] && [ "$myUSER" != "" ];
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
while [ "$myPASS1" != "$myPASS2" ] && [ "$mySECURE" == "0" ]
|
||||
do
|
||||
while [ "$myPASS1" == "pass1" ] || [ "$myPASS1" == "" ]
|
||||
do
|
||||
myPASS1=$(dialog --insecure --backtitle "$myBACKTITLE" \
|
||||
--title "[ Enter password for your web user ]" \
|
||||
--passwordbox "\nPassword" 9 60 3>&1 1>&2 2>&3 3>&-)
|
||||
done
|
||||
myPASS2=$(dialog --insecure --backtitle "$myBACKTITLE" \
|
||||
--title "[ Repeat password for your web user ]" \
|
||||
--passwordbox "\nPassword" 9 60 3>&1 1>&2 2>&3 3>&-)
|
||||
if [ "$myPASS1" != "$myPASS2" ];
|
||||
then
|
||||
dialog --backtitle "$myBACKTITLE" --title "[ Passwords do not match. ]" \
|
||||
--msgbox "\nPlease re-enter your password." 7 60
|
||||
myPASS1="pass1"
|
||||
myPASS2="pass2"
|
||||
fi
|
||||
mySECURE=$(printf "%s" "$myPASS1" | cracklib-check | grep -c "OK")
|
||||
if [ "$mySECURE" == "0" ] && [ "$myPASS1" == "$myPASS2" ];
|
||||
then
|
||||
dialog --backtitle "$myBACKTITLE" --title "[ Password is not secure ]" --defaultno --yesno "\nKeep insecure password?" 7 50
|
||||
myOK=$?
|
||||
if [ "$myOK" == "1" ];
|
||||
then
|
||||
myPASS1="pass1"
|
||||
myPASS2="pass2"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
htpasswd -b -c /etc/nginx/nginxpasswd "$myUSER" "$myPASS1" 2>&1 | dialog --title "[ Setting up user and password ]" $myPROGRESSBOXCONF;
|
||||
|
||||
# Let's generate a SSL self-signed certificate without interaction (browsers will see it invalid anyway)
|
||||
tput civis
|
||||
mkdir -p /etc/nginx/ssl 2>&1 | dialog --title "[ Generating a self-signed-certificate for NGINX ]" $myPROGRESSBOXCONF;
|
||||
openssl req \
|
||||
-nodes \
|
||||
-x509 \
|
||||
-sha512 \
|
||||
-newkey rsa:8192 \
|
||||
-keyout "/etc/nginx/ssl/nginx.key" \
|
||||
-out "/etc/nginx/ssl/nginx.crt" \
|
||||
-days 3650 \
|
||||
-subj '/C=AU/ST=Some-State/O=Internet Widgits Pty Ltd' 2>&1 | dialog --title "[ Generating a self-signed-certificate for NGINX ]" $myPROGRESSBOXCONF;
|
||||
|
||||
# Let's setup the ntp server
|
||||
if [ -f $myNTPCONFPATH ];
|
||||
then
|
||||
dialog --title "[ Setting up the ntp server ]" $myPROGRESSBOXCONF <<EOF
|
||||
EOF
|
||||
cp $myNTPCONFPATH /etc/ntp.conf 2>&1 | dialog --title "[ Setting up the ntp server ]" $myPROGRESSBOXCONF
|
||||
fi
|
||||
|
||||
# Let's setup 802.1x networking
|
||||
if [ -f $myPFXPATH ];
|
||||
then
|
||||
dialog --title "[ Setting 802.1x networking ]" $myPROGRESSBOXCONF <<EOF
|
||||
EOF
|
||||
cp $myPFXPATH /etc/wpa_supplicant/ 2>&1 | dialog --title "[ Setting 802.1x networking ]" $myPROGRESSBOXCONF
|
||||
if [ -f $myPFXPWPATH ];
|
||||
then
|
||||
dialog --title "[ Setting up 802.1x password ]" $myPROGRESSBOXCONF <<EOF
|
||||
EOF
|
||||
myPFXPW=$(cat $myPFXPWPATH)
|
||||
fi
|
||||
myPFXHOSTID=$(cat $myPFXHOSTIDPATH)
|
||||
tee -a /etc/network/interfaces 2>&1>/dev/null <<EOF
|
||||
wpa-driver wired
|
||||
wpa-conf /etc/wpa_supplicant/wired8021x.conf
|
||||
|
||||
### Example wireless config for 802.1x
|
||||
### This configuration was tested with the IntelNUC series
|
||||
### If problems occur you can try and change wpa-driver to "iwlwifi"
|
||||
### Do not forget to enter a ssid in /etc/wpa_supplicant/wireless8021x.conf
|
||||
### The Intel NUC uses wlpXsY notation instead of wlanX
|
||||
#
|
||||
#auto wlp2s0
|
||||
#iface wlp2s0 inet dhcp
|
||||
# wpa-driver wext
|
||||
# wpa-conf /etc/wpa_supplicant/wireless8021x.conf
|
||||
EOF
|
||||
|
||||
tee /etc/wpa_supplicant/wired8021x.conf 2>&1>/dev/null <<EOF
|
||||
ctrl_interface=/var/run/wpa_supplicant
|
||||
ctrl_interface_group=root
|
||||
eapol_version=1
|
||||
ap_scan=1
|
||||
network={
|
||||
key_mgmt=IEEE8021X
|
||||
eap=TLS
|
||||
identity="host/$myPFXHOSTID"
|
||||
private_key="/etc/wpa_supplicant/8021x.pfx"
|
||||
private_key_passwd="$myPFXPW"
|
||||
}
|
||||
EOF
|
||||
|
||||
tee /etc/wpa_supplicant/wireless8021x.conf 2>&1>/dev/null <<EOF
|
||||
ctrl_interface=/var/run/wpa_supplicant
|
||||
ctrl_interface_group=root
|
||||
eapol_version=1
|
||||
ap_scan=1
|
||||
network={
|
||||
ssid="<your_ssid_here_without_brackets>"
|
||||
key_mgmt=WPA-EAP
|
||||
pairwise=CCMP
|
||||
group=CCMP
|
||||
eap=TLS
|
||||
identity="host/$myPFXHOSTID"
|
||||
private_key="/etc/wpa_supplicant/8021x.pfx"
|
||||
private_key_passwd="$myPFXPW"
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Let's provide a wireless example config ...
|
||||
fuECHO "### Providing static ip, wireless example config."
|
||||
tee -a /etc/network/interfaces 2>&1>/dev/null <<EOF
|
||||
|
||||
### Example static ip config
|
||||
### Replace <eth0> with the name of your physical interface name
|
||||
#
|
||||
#auto eth0
|
||||
#iface eth0 inet static
|
||||
# address 192.168.1.1
|
||||
# netmask 255.255.255.0
|
||||
# network 192.168.1.0
|
||||
# broadcast 192.168.1.255
|
||||
# gateway 192.168.1.1
|
||||
# dns-nameservers 192.168.1.1
|
||||
|
||||
### Example wireless config without 802.1x
|
||||
### This configuration was tested with the IntelNUC series
|
||||
### If problems occur you can try and change wpa-driver to "iwlwifi"
|
||||
#
|
||||
#auto wlan0
|
||||
#iface wlan0 inet dhcp
|
||||
# wpa-driver wext
|
||||
# wpa-ssid <your_ssid_here_without_brackets>
|
||||
# wpa-ap-scan 1
|
||||
# wpa-proto RSN
|
||||
# wpa-pairwise CCMP
|
||||
# wpa-group CCMP
|
||||
# wpa-key-mgmt WPA-PSK
|
||||
# wpa-psk "<your_password_here_without_brackets>"
|
||||
EOF
|
||||
|
||||
# Let's modify the sources list
|
||||
sed -i '/cdrom/d' /etc/apt/sources.list
|
||||
|
||||
# Let's make sure SSH roaming is turned off (CVE-2016-0777, CVE-2016-0778)
|
||||
fuECHO "### Let's make sure SSH roaming is turned off."
|
||||
tee -a /etc/ssh/ssh_config 2>&1>/dev/null <<EOF
|
||||
UseRoaming no
|
||||
EOF
|
||||
|
||||
# Let's pull some updates
|
||||
apt-get update -y 2>&1 | dialog --title "[ Pulling updates ]" $myPROGRESSBOXCONF
|
||||
apt-get upgrade -y 2>&1 | dialog --title "[ Pulling updates ]" $myPROGRESSBOXCONF
|
||||
|
||||
# Let's clean up apt
|
||||
apt-get autoclean -y 2>&1 | dialog --title "[ Pulling updates ]" $myPROGRESSBOXCONF
|
||||
apt-get autoremove -y 2>&1 | dialog --title "[ Pulling updates ]" $myPROGRESSBOXCONF
|
||||
|
||||
# Installing docker-compose, wetty, ctop, elasticdump, tpot
|
||||
pip install --upgrade pip 2>&1 | dialog --title "[ Installing pip ]" $myPROGRESSBOXCONF
|
||||
pip install docker-compose==1.12.0 2>&1 | dialog --title "[ Installing docker-compose ]" $myPROGRESSBOXCONF
|
||||
pip install elasticsearch-curator==5.1.1 2>&1 | dialog --title "[ Installing elasticsearch-curator ]" $myPROGRESSBOXCONF
|
||||
ln -s /usr/bin/nodejs /usr/bin/node 2>&1 | dialog --title "[ Installing wetty ]" $myPROGRESSBOXCONF
|
||||
npm install https://github.com/t3chn0m4g3/wetty -g 2>&1 | dialog --title "[ Installing wetty ]" $myPROGRESSBOXCONF
|
||||
npm install https://github.com/t3chn0m4g3/elasticsearch-dump -g 2>&1 | dialog --title "[ Installing elasticsearch-dump ]" $myPROGRESSBOXCONF
|
||||
wget https://github.com/bcicen/ctop/releases/download/v0.6.1/ctop-0.6.1-linux-amd64 -O ctop 2>&1 | dialog --title "[ Installing ctop ]" $myPROGRESSBOXCONF
|
||||
git clone https://github.com/dtag-dev-sec/tpotce -b autoupdate /opt/tpot 2>&1 | dialog --title "[ Cloning T-Pot ]" $myPROGRESSBOXCONF
|
||||
mv ctop /usr/bin/ 2>&1 | dialog --title "[ Installing ctop ]" $myPROGRESSBOXCONF
|
||||
chmod +x /usr/bin/ctop 2>&1 | dialog --title "[ Installing ctop ]" $myPROGRESSBOXCONF
|
||||
# Let's add a new user
|
||||
addgroup --gid 2000 tpot 2>&1 | dialog --title "[ Adding new user ]" $myPROGRESSBOXCONF
|
||||
adduser --system --no-create-home --uid 2000 --disabled-password --disabled-login --gid 2000 tpot 2>&1 | dialog --title "[ Adding new user ]" $myPROGRESSBOXCONF
|
||||
|
||||
# Let's set the hostname
|
||||
a=$(fuRANDOMWORD /opt/tpot/host/usr/share/dict/a.txt)
|
||||
n=$(fuRANDOMWORD /opt/tpot/host/usr/share/dict/n.txt)
|
||||
myHOST=$a$n
|
||||
hostnamectl set-hostname $myHOST 2>&1 | dialog --title "[ Setting new hostname ]" $myPROGRESSBOXCONF
|
||||
sed -i 's#127.0.1.1.*#127.0.1.1\t'"$myHOST"'#g' /etc/hosts 2>&1 | dialog --title "[ Setting new hostname ]" $myPROGRESSBOXCONF
|
||||
|
||||
# Let's patch sshd_config
|
||||
sed -i 's#Port 22#Port 64295#' /etc/ssh/sshd_config 2>&1 | dialog --title "[ SSH listen on tcp/64295 ]" $myPROGRESSBOXCONF
|
||||
sed -i 's#\#PasswordAuthentication yes#PasswordAuthentication no#' /etc/ssh/sshd_config 2>&1 | dialog --title "[ SSH password authentication only from RFC1918 networks ]" $myPROGRESSBOXCONF
|
||||
tee -a /etc/ssh/sshd_config 2>&1>/dev/null <<EOF
|
||||
|
||||
|
||||
Match address 127.0.0.1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
|
||||
PasswordAuthentication yes
|
||||
EOF
|
||||
|
||||
# Let's make sure only myFLAVOR images will be downloaded and started
|
||||
case $myFLAVOR in
|
||||
HP)
|
||||
echo "### Preparing HONEYPOT flavor installation."
|
||||
cp /opt/tpot/etc/compose/hp.yml $myTPOTCOMPOSE 2>&1>/dev/null
|
||||
;;
|
||||
INDUSTRIAL)
|
||||
echo "### Preparing INDUSTRIAL flavor installation."
|
||||
cp /opt/tpot/etc/compose/industrial.yml $myTPOTCOMPOSE 2>&1>/dev/null
|
||||
;;
|
||||
TPOT)
|
||||
echo "### Preparing TPOT flavor installation."
|
||||
cp /opt/tpot/etc/compose/tpot.yml $myTPOTCOMPOSE 2>&1>/dev/null
|
||||
;;
|
||||
EVERYTHING)
|
||||
echo "### Preparing EVERYTHING flavor installation."
|
||||
cp /opt/tpot/etc/compose/all.yml $myTPOTCOMPOSE 2>&1>/dev/null
|
||||
;;
|
||||
esac
|
||||
|
||||
# Let's load docker images
|
||||
myIMAGESCOUNT=$(cat $myTPOTCOMPOSE | grep -v '#' | grep image | cut -d: -f2 | wc -l)
|
||||
j=0
|
||||
for name in $(cat $myTPOTCOMPOSE | grep -v '#' | grep image | cut -d'"' -f2)
|
||||
do
|
||||
dialog --title "[ Downloading docker images, please be patient ]" --backtitle "$myBACKTITLE" \
|
||||
--gauge "\n Now downloading: $name\n" 8 80 $(expr 100 \* $j / $myIMAGESCOUNT) <<EOF
|
||||
EOF
|
||||
docker pull $name 2>&1>/dev/null
|
||||
let j+=1
|
||||
dialog --title "[ Downloading docker images, please be patient ]" --backtitle "$myBACKTITLE" \
|
||||
--gauge "\n Now downloading: $name\n" 8 80 $(expr 100 \* $j / $myIMAGESCOUNT) <<EOF
|
||||
EOF
|
||||
done
|
||||
|
||||
# Let's add the daily update check with a weekly clean interval
|
||||
dialog --title "[ Modifying update checks ]" $myPROGRESSBOXCONF <<EOF
|
||||
EOF
|
||||
tee /etc/apt/apt.conf.d/10periodic 2>&1>/dev/null <<EOF
|
||||
APT::Periodic::Update-Package-Lists "1";
|
||||
APT::Periodic::Download-Upgradeable-Packages "0";
|
||||
APT::Periodic::AutocleanInterval "7";
|
||||
EOF
|
||||
|
||||
# Let's make sure to reboot the system after a kernel panic
|
||||
dialog --title "[ Reboot after kernel panic ]" $myPROGRESSBOXCONF <<EOF
|
||||
EOF
|
||||
tee -a /etc/sysctl.conf 2>&1>/dev/null <<EOF
|
||||
|
||||
# Reboot after kernel panic, check via /proc/sys/kernel/panic[_on_oops]
|
||||
# Set required map count for ELK
|
||||
kernel.panic = 1
|
||||
kernel.panic_on_oops = 1
|
||||
vm.max_map_count = 262144
|
||||
net.ipv6.conf.all.disable_ipv6 = 1
|
||||
net.ipv6.conf.default.disable_ipv6 = 1
|
||||
net.ipv6.conf.lo.disable_ipv6 = 1
|
||||
EOF
|
||||
|
||||
# Let's add some cronjobs
|
||||
dialog --title "[ Adding cronjobs ]" $myPROGRESSBOXCONF <<EOF
|
||||
EOF
|
||||
tee -a /etc/crontab 2>&1>/dev/null <<EOF
|
||||
|
||||
# Check if updated images are available and download them
|
||||
27 1 * * * root /usr/bin/docker-compose -f /opt/tpot/etc/tpot.yml pull
|
||||
|
||||
# Delete elasticsearch logstash indices older than 90 days
|
||||
27 4 * * * root /usr/local/bin/curator --config /opt/tpot/etc/curator/curator.yml /opt/tpot/etc/curator/actions.yml
|
||||
|
||||
# Uploaded binaries are not supposed to be downloaded
|
||||
*/1 * * * * root mv --backup=numbered /data/dionaea/roots/ftp/* /data/dionaea/binaries/
|
||||
|
||||
# Daily reboot
|
||||
27 3 * * * root reboot
|
||||
|
||||
# Check for updated packages every sunday, upgrade and reboot
|
||||
27 16 * * 0 root apt-get autoclean -y && apt-get autoremove -y && apt-get update -y && apt-get upgrade -y && sleep 10 && reboot
|
||||
EOF
|
||||
|
||||
# Let's create some files and folders
|
||||
mkdir -p /data/conpot/log \
|
||||
/data/cowrie/log/tty/ /data/cowrie/downloads/ /data/cowrie/keys/ /data/cowrie/misc/ \
|
||||
/data/dionaea/log /data/dionaea/bistreams /data/dionaea/binaries /data/dionaea/rtp /data/dionaea/roots/ftp /data/dionaea/roots/tftp /data/dionaea/roots/www /data/dionaea/roots/upnp \
|
||||
/data/elasticpot/log \
|
||||
/data/elk/data /data/elk/log \
|
||||
/data/glastopf /data/honeytrap/log/ /data/honeytrap/attacks/ /data/honeytrap/downloads/ \
|
||||
/data/mailoney/log \
|
||||
/data/emobility/log \
|
||||
/data/ews/conf \
|
||||
/data/rdpy/log \
|
||||
/data/spiderfoot \
|
||||
/data/suricata/log /home/tsec/.ssh/ \
|
||||
/data/p0f/log \
|
||||
/data/vnclowpot/log 2>&1 | dialog --title "[ Creating some files and folders ]" $myPROGRESSBOXCONF
|
||||
touch /data/spiderfoot/spiderfoot.db 2>&1 | dialog --title "[ Creating some files and folders ]" $myPROGRESSBOXCONF
|
||||
|
||||
# Let's copy some files
|
||||
tar xvfz /opt/tpot/etc/objetcs/elkbase.tgz -C / 2>&1 | dialog --title "[ Extracting elkbase.tgz ]" $myPROGRESSBOXCONF
|
||||
cp /opt/tpot/host/etc/systemd/* /etc/systemd/system/ 2>&1 | dialog --title "[ Copy configs ]" $myPROGRESSBOXCONF
|
||||
cp /opt/tpot/host/etc/issue /etc/ 2>&1 | dialog --title "[ Copy configs ]" $myPROGRESSBOXCONF
|
||||
cp -R /opt/tpot/host/etc/nginx/ssl /etc/nginx/ 2>&1 | dialog --title "[ Copy configs ]" $myPROGRESSBOXCONF
|
||||
cp /opt/tpot/host/etc/nginx/tpotweb.conf /etc/nginx/sites-available/ 2>&1 | dialog --title "[ Copy configs ]" $myPROGRESSBOXCONF
|
||||
cp /opt/tpot/host/etc/nginx/nginx.conf /etc/nginx/nginx.conf 2>&1 | dialog --title "[ Copy configs ]" $myPROGRESSBOXCONF
|
||||
cp /opt/tpot/host/usr/share/nginx/html/* /usr/share/nginx/html/ 2>&1 | dialog --title "[ Copy configs ]" $myPROGRESSBOXCONF
|
||||
cp /root/installer/keys/authorized_keys /home/tsec/.ssh/authorized_keys 2>&1 | dialog --title "[ Copy configs ]" $myPROGRESSBOXCONF
|
||||
systemctl enable tpot 2>&1 | dialog --title "[ Enabling service for tpot ]" $myPROGRESSBOXCONF
|
||||
systemctl enable wetty 2>&1 | dialog --title "[ Enabling service for wetty ]" $myPROGRESSBOXCONF
|
||||
|
||||
# Let's enable T-Pot website
|
||||
ln -s /etc/nginx/sites-available/tpotweb.conf /etc/nginx/sites-enabled/tpotweb.conf 2>&1 | dialog --title "[ Enabling T-Pot website ]" $myPROGRESSBOXCONF
|
||||
|
||||
# Let's take care of some files and permissions
|
||||
chmod 760 -R /data 2>&1 | dialog --title "[ Set permissions and ownerships ]" $myPROGRESSBOXCONF
|
||||
chown tpot:tpot -R /data 2>&1 | dialog --title "[ Set permissions and ownerships ]" $myPROGRESSBOXCONF
|
||||
chmod 600 /home/tsec/.ssh/authorized_keys 2>&1 | dialog --title "[ Set permissions and ownerships ]" $myPROGRESSBOXCONF
|
||||
chown tsec:tsec /home/tsec/.ssh /home/tsec/.ssh/authorized_keys 2>&1 | dialog --title "[ Set permissions and ownerships ]" $myPROGRESSBOXCONF
|
||||
|
||||
# Let's replace "quiet splash" options, set a console font for more screen canvas and update grub
|
||||
sed -i 's#GRUB_CMDLINE_LINUX_DEFAULT="quiet splash"#GRUB_CMDLINE_LINUX_DEFAULT="consoleblank=0"#' /etc/default/grub 2>&1>/dev/null
|
||||
sed -i 's#GRUB_CMDLINE_LINUX=""#GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"#' /etc/default/grub 2>&1>/dev/null
|
||||
update-grub 2>&1 | dialog --title "[ Update grub ]" $myPROGRESSBOXCONF
|
||||
cp /usr/share/consolefonts/Uni2-Terminus12x6.psf.gz /etc/console-setup/
|
||||
gunzip /etc/console-setup/Uni2-Terminus12x6.psf.gz
|
||||
sed -i 's#FONTFACE=".*#FONTFACE="Terminus"#' /etc/default/console-setup
|
||||
sed -i 's#FONTSIZE=".*#FONTSIZE="12x6"#' /etc/default/console-setup
|
||||
update-initramfs -u 2>&1 | dialog --title "[ Update initramfs ]" $myPROGRESSBOXCONF
|
||||
|
||||
# Let's enable a color prompt and add /opt/tpot/bin to path
|
||||
myROOTPROMPT='PS1="\[\033[38;5;8m\][\[$(tput sgr0)\]\[\033[38;5;1m\]\u\[$(tput sgr0)\]\[\033[38;5;6m\]@\[$(tput sgr0)\]\[\033[38;5;4m\]\h\[$(tput sgr0)\]\[\033[38;5;6m\]:\[$(tput sgr0)\]\[\033[38;5;5m\]\w\[$(tput sgr0)\]\[\033[38;5;8m\]]\[$(tput sgr0)\]\[\033[38;5;1m\]\\$\[$(tput sgr0)\]\[\033[38;5;15m\] \[$(tput sgr0)\]"'
|
||||
myUSERPROMPT='PS1="\[\033[38;5;8m\][\[$(tput sgr0)\]\[\033[38;5;2m\]\u\[$(tput sgr0)\]\[\033[38;5;6m\]@\[$(tput sgr0)\]\[\033[38;5;4m\]\h\[$(tput sgr0)\]\[\033[38;5;6m\]:\[$(tput sgr0)\]\[\033[38;5;5m\]\w\[$(tput sgr0)\]\[\033[38;5;8m\]]\[$(tput sgr0)\]\[\033[38;5;2m\]\\$\[$(tput sgr0)\]\[\033[38;5;15m\] \[$(tput sgr0)\]"'
|
||||
tee -a /root/.bashrc 2>&1>/dev/null <<EOF
|
||||
$myROOTPROMPT
|
||||
PATH="$PATH:/opt/tpot/bin"
|
||||
EOF
|
||||
tee -a /home/tsec/.bashrc 2>&1>/dev/null <<EOF
|
||||
$myUSERPROMPT
|
||||
PATH="$PATH:/opt/tpot/bin"
|
||||
EOF
|
||||
|
||||
# Let's create ews.ip before reboot and prevent race condition for first start
|
||||
/opt/tpot/bin/updateip.sh 2>&1>/dev/null
|
||||
|
||||
# Final steps
|
||||
cp /opt/tpot/host/etc/rc.local /etc/rc.local 2>&1>/dev/null && \
|
||||
rm -rf /root/installer 2>&1>/dev/null && \
|
||||
dialog --no-ok --no-cancel --backtitle "$myBACKTITLE" --title "[ Thanks for your patience. Now rebooting. ]" --pause "" 6 80 2 && \
|
||||
reboot
|
1
iso/installer/keys/authorized_keys
Normal file
1
iso/installer/keys/authorized_keys
Normal file
|
@ -0,0 +1 @@
|
|||
|
2
iso/installer/rc.local.install
Executable file
2
iso/installer/rc.local.install
Executable file
|
@ -0,0 +1,2 @@
|
|||
#!/bin/bash
|
||||
openvt -w -s /root/installer/install.sh
|
7
iso/isolinux/txt.cfg
Executable file
7
iso/isolinux/txt.cfg
Executable file
|
@ -0,0 +1,7 @@
|
|||
default install
|
||||
label install
|
||||
menu label ^T-Pot 17.10 (Alpha)
|
||||
menu default
|
||||
kernel linux
|
||||
append vga=788 initrd=initrd.gz console-setup/ask_detect=true --
|
||||
#append vga=788 initrd=initrd.gz console-setup/ask_detect=true DEBCONF_DEBUG=developer
|
125
iso/preseed/tpot.seed
Executable file
125
iso/preseed/tpot.seed
Executable file
|
@ -0,0 +1,125 @@
|
|||
##############################################
|
||||
### T-Pot Preseed Configuration File by mo ###
|
||||
##############################################
|
||||
|
||||
####################
|
||||
### Locale Selection
|
||||
####################
|
||||
#d-i debian-installer/country string DE
|
||||
d-i debian-installer/language string en
|
||||
d-i debian-installer/locale string en_US.UTF-8
|
||||
d-i localechooser/preferred-locale string en_US.UTF-8
|
||||
|
||||
######################
|
||||
### Keyboard Selection
|
||||
######################
|
||||
#d-i console-setup/ask_detect boolean true
|
||||
#d-i keyboard-configuration/layoutcode string de
|
||||
d-i console-setup/detected note
|
||||
|
||||
#############################
|
||||
### Unmount Active Partitions
|
||||
#############################
|
||||
#d-i preseed/early_command string umount /media || :
|
||||
|
||||
#########################
|
||||
### Network Configuration
|
||||
#########################
|
||||
#d-i netcfg/choose_interface select auto
|
||||
#d-i netcfg/dhcp_timeout string 60
|
||||
d-i netcfg/get_hostname string t-pot
|
||||
|
||||
###############
|
||||
### Disk Layout
|
||||
###############
|
||||
d-i partman/early_command string \
|
||||
debconf-set partman-auto/disk $(parted_devices | sort -k2nr | head -1 | cut -f1)
|
||||
|
||||
d-i partman-auto/method string regular
|
||||
d-i partman-lvm/device_remove_lvm boolean true
|
||||
d-i partman-md/device_remove_md boolean true
|
||||
d-i partman-auto/choose_recipe select atomic
|
||||
d-i partman-auto/expert_recipe string \
|
||||
root :: \
|
||||
8192 8888 8192 linux-swap \
|
||||
$primary{ } \
|
||||
method{ swap } format{ } \
|
||||
. \
|
||||
40960 44444 -1 ext4 \
|
||||
$primary{ } $bootable{ } \
|
||||
method{ format } format{ } \
|
||||
use_filesystem{ } filesystem{ ext4 } \
|
||||
mountpoint{ / } \
|
||||
.
|
||||
d-i partman-partitioning/confirm_write_new_label boolean true
|
||||
d-i partman/choose_partition select finish
|
||||
d-i partman/confirm boolean true
|
||||
d-i partman/confirm_nooverwrite boolean true
|
||||
|
||||
######################
|
||||
### User Configuration
|
||||
######################
|
||||
d-i passwd/root-login boolean false
|
||||
d-i passwd/make-user boolean true
|
||||
d-i passwd/user-fullname string tsec
|
||||
d-i passwd/username string tsec
|
||||
d-i passwd/user-password-crypted password $1$jAw1TW8v$a2WFamxQJfpPYZmn4qJT71
|
||||
d-i user-setup/encrypt-home boolean false
|
||||
|
||||
########################################
|
||||
### Country Mirror & Proxy Configuration
|
||||
########################################
|
||||
d-i mirror/country string manual
|
||||
d-i mirror/http/hostname string archive.ubuntu.com
|
||||
d-i mirror/http/directory string /ubuntu
|
||||
d-i mirror/http/proxy string
|
||||
|
||||
###########################
|
||||
### Skip Grub Configuration
|
||||
###########################
|
||||
#d-i grub-installer/confirm boolean true
|
||||
#d-i grub-installer/only_debian boolean true
|
||||
#d-i grub-installer/with_other_os boolean true
|
||||
d-i grub-installer/skip boolean true
|
||||
d-i lilo-installer/skip boolean true
|
||||
|
||||
######################
|
||||
### Time Configuration
|
||||
######################
|
||||
#d-i time/zone string Europe/Berlin
|
||||
d-i clock-setup/utc boolean true
|
||||
d-i time/zone string UTC
|
||||
d-i clock-setup/ntp boolean true
|
||||
d-i clock-setup/ntp-server string ntp.ubuntu.com
|
||||
|
||||
##################
|
||||
### Package Groups
|
||||
##################
|
||||
tasksel tasksel/first multiselect ubuntu-server
|
||||
|
||||
########################
|
||||
### Package Installation
|
||||
########################
|
||||
d-i pkgsel/include string apache2-utils apparmor apt-transport-https aufs-tools bash-completion build-essential ca-certificates cgroupfs-mount curl dialog dnsutils docker.io dstat ethtool genisoimage git glances html2text htop iptables iw jq libcrack2 libltdl7 lm-sensors man nginx-extras nodejs npm ntp openssh-server openssl prips syslinux psmisc pv python-pip unzip vim wireless-tools wpasupplicant
|
||||
|
||||
#################
|
||||
### Update Policy
|
||||
#################
|
||||
d-i pkgsel/update-policy select unattended-upgrades
|
||||
|
||||
#########################################
|
||||
### Post install (Grub & T-Pot Installer)
|
||||
#########################################
|
||||
d-i preseed/late_command string \
|
||||
in-target apt-get -y install grub-pc; \
|
||||
in-target grub-install --force $(debconf-get partman-auto/disk); \
|
||||
in-target update-grub; \
|
||||
cp /opt/installer/rc.local.install /target/etc/rc.local; \
|
||||
cp /opt/installer -R /target/root/;
|
||||
|
||||
##########
|
||||
### Reboot
|
||||
##########
|
||||
d-i nobootloader/confirmation_common note
|
||||
d-i finish-install/reboot_in_progress note
|
||||
d-i cdrom-detect/eject boolean true
|
26
makeiso.sh
26
makeiso.sh
|
@ -12,15 +12,15 @@ myUBUNTULINK="http://archive.ubuntu.com/ubuntu/dists/xenial-updates/main/install
|
|||
myUBUNTUISO="mini.iso"
|
||||
myTPOTISO="tpot.iso"
|
||||
myTPOTDIR="tpotiso"
|
||||
myTPOTSEED="preseed/tpot.seed"
|
||||
myTPOTSEED="iso/preseed/tpot.seed"
|
||||
myPACKAGES="dialog genisoimage syslinux syslinux-utils pv udisks2"
|
||||
myAUTHKEYSPATH="installer/keys/authorized_keys"
|
||||
myPFXPATH="installer/keys/8021x.pfx"
|
||||
myPFXPWPATH="installer/keys/8021x.pw"
|
||||
myPFXHOSTIDPATH="installer/keys/8021x.id"
|
||||
myINSTALLERPATH="installer/install.sh"
|
||||
myPROXYCONFIG="installer/etc/proxy"
|
||||
myNTPCONFPATH="installer/etc/ntp"
|
||||
myAUTHKEYSPATH="iso/installer/keys/authorized_keys"
|
||||
myPFXPATH="iso/installer/keys/8021x.pfx"
|
||||
myPFXPWPATH="iso/installer/keys/8021x.pw"
|
||||
myPFXHOSTIDPATH="iso/installer/keys/8021x.id"
|
||||
myINSTALLERPATH="iso/installer/install.sh"
|
||||
myPROXYCONFIG="iso/installer/proxy"
|
||||
myNTPCONFPATH="iso/installer/ntp"
|
||||
myTMP="tmp"
|
||||
|
||||
# Got root?
|
||||
|
@ -33,7 +33,7 @@ if [ "$myWHOAMI" != "root" ]
|
|||
fi
|
||||
|
||||
# Let's load dialog color theme
|
||||
cp installer/etc/dialogrc /etc/
|
||||
cp host/etc/dialogrc /etc/
|
||||
|
||||
# Let's clean up at the end or if something goes wrong ...
|
||||
function fuCLEANUP {
|
||||
|
@ -228,10 +228,10 @@ rm initrd
|
|||
cd ..
|
||||
|
||||
# Let's add the files for the automated install
|
||||
mkdir -p $myTPOTDIR/tmp/opt/tpot
|
||||
cp installer/* -R $myTPOTDIR/tmp/opt/tpot/
|
||||
cp isolinux/* $myTPOTDIR/
|
||||
cp preseed/tpot.seed $myTPOTDIR/tmp/preseed.cfg
|
||||
mkdir -p $myTPOTDIR/tmp/opt/
|
||||
cp iso/installer -R $myTPOTDIR/tmp/opt/
|
||||
cp iso/isolinux/* $myTPOTDIR/
|
||||
cp iso/preseed/tpot.seed $myTPOTDIR/tmp/preseed.cfg
|
||||
|
||||
# Let's create the new initrd
|
||||
cd $myTPOTDIR/tmp
|
||||
|
|
Loading…
Reference in a new issue