manage kibana objetcs, ES dump and restore, ES folder backup

This commit is contained in:
Marco Ochse 2017-04-14 22:08:35 +00:00
parent 90592e7388
commit c9827f0f03
9 changed files with 202 additions and 131 deletions

View file

@ -1,60 +0,0 @@
#!/bin/bash
########################################################
# T-Pot #
# ELK DB backup script #
# #
# v16.10.0 by mo, DTAG, 2016-05-12 #
########################################################
myCOUNT=1
myDATE=$(date +%Y%m%d%H%M)
myELKPATH="/data/elk/"
myBACKUPPATH="/data/"
# Make sure not to interrupt a check
while true
do
if ! [ -a /var/run/check.lock ];
then break
fi
sleep 0.1
if [ "$myCOUNT" = "1" ];
then
echo -n "Waiting for services "
else echo -n .
fi
if [ "$myCOUNT" = "6000" ];
then
echo
echo "Overriding check.lock"
rm /var/run/check.lock
break
fi
myCOUNT=$[$myCOUNT +1]
done
# We do not want to get interrupted by a check
touch /var/run/check.lock
# Stop ELK to lift db lock
echo "Now stopping ELK ..."
systemctl stop elk
sleep 10
# Backup DB in 2 flavors
echo "Now backing up Elasticsearch data ..."
tar cvfz $myBACKUPPATH"$myDATE"_elkall.tgz $myELKPATH
rm -rf "$myELKPATH"log/*
rm -rf "$myELKPATH"data/tpotcluster/nodes/0/indices/logstash*
tar cvfz $myBACKUPPATH"$myDATE"_elkbase.tgz $myELKPATH
rm -rf $myELKPATH
tar xvfz $myBACKUPPATH"$myDATE"_elkall.tgz -C /
chmod 760 -R $myELKPATH
chown tpot:tpot -R $myELKPATH
# Start ELK
systemctl start elk
echo "Now starting up ELK ..."
# Allow checks to resume
rm /var/run/check.lock

View file

@ -0,0 +1,64 @@
#!/bin/bash
# Make sure ES is available
myES="http://127.0.0.1:64298/"
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
if ! [ "$myESSTATUS" = "1" ]
then
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
exit
else
echo "### Elasticsearch is available, now continuing."
echo
fi
# Set vars
myCOUNT=1
myDATE=$(date +%Y%m%d%H%M)
myELKPATH="/data/elk/data"
myKIBANAINDEXNAME=$(curl -s -XGET ''$myES'_cat/indices/' | grep .kibana | awk '{ print $4 }')
myKIBANAINDEXPATH=$myELKPATH/nodes/0/indices/$myKIBANAINDEXNAME
# Let's ensure normal operation on exit or if interrupted ...
function fuCLEANUP {
### Start ELK
systemctl start elk
echo "### Now starting up ELK ..."
### Allow checks to resume
rm -rf /var/run/check.lock
}
trap fuCLEANUP EXIT
# Make sure not to interrupt a check
while true
do
if ! [ -a /var/run/check.lock ];
then break
fi
sleep 0.1
if [ "$myCOUNT" = "1" ];
then
echo -n "### Waiting for services "
else echo -n .
fi
if [ "$myCOUNT" = "6000" ];
then
echo
echo "### Overriding check.lock"
rm /var/run/check.lock
break
fi
myCOUNT=$[$myCOUNT +1]
done
# We do not want to get interrupted by a check
touch /var/run/check.lock
# Stop ELK to lift db lock
echo "### Now stopping ELK ..."
systemctl stop elk
sleep 10
# Backup DB in 2 flavors
echo "### Now backing up Elasticsearch folders ..."
tar cvfz "elkall_"$myDATE".tgz" $myELKPATH
tar cvfz "elkbase_"$myDATE".tgz" $myKIBANAINDEXPATH

View file

@ -1,21 +0,0 @@
#/bin/bash
myDATE=$(date +%Y%m%d%H%M)
myINDICES=$(curl -s -XGET 'http://127.0.0.1:64298/_cat/indices/' | grep logstash | awk '{ print $3 }' | sort | grep -v 1970)
myES="http://127.0.0.1:64298/"
myCOL1=""
myCOL0=""
mkdir $myDATE
for i in $myINDICES;
do
echo $myCOL1"### Now dumping: "$i $myCOL0
elasticdump --input=$myES$i --output=$myDATE"/"$i --limit 7500
echo $myCOL1"### Now compressing: $myDATE/$i" $myCOL0
gzip -f $myDATE"/"$i
done;
echo $myCOL1"### Now building tar archive: es_dump_"$myDATE".tgz" $myCOL0
cd $myDATE
tar cvfz es_dump_$myDATE.tgz *
mv es_dump_$myDATE.tgz ..
cd ..
rm -rf $myDATE
echo $myCOL1"### Done."$myCOL0

44
installer/bin/dump_es.sh Executable file
View file

@ -0,0 +1,44 @@
#/bin/bash
# Make sure ES is available
myES="http://127.0.0.1:64298/"
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
if ! [ "$myESSTATUS" = "1" ]
then
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
exit
else
echo "### Elasticsearch is available, now continuing."
echo
fi
# Let's ensure normal operation on exit or if interrupted ...
function fuCLEANUP {
rm -rf tmp
}
trap fuCLEANUP EXIT
# Set vars
myDATE=$(date +%Y%m%d%H%M)
myINDICES=$(curl -s -XGET ''$myES'_cat/indices/' | grep logstash | awk '{ print $3 }' | sort | grep -v 1970)
myES="http://127.0.0.1:64298/"
myCOL1=""
myCOL0=""
# Dumping all ES data
echo $myCOL1"### The following indices will be dumped: "$myCOL0
echo $myINDICES
echo
mkdir tmp
for i in $myINDICES;
do
echo $myCOL1"### Now dumping: "$i $myCOL0
elasticdump --input=$myES$i --output="tmp/"$i --limit 7500
echo $myCOL1"### Now compressing: tmp/$i" $myCOL0
gzip -f "tmp/"$i
done;
# Build tar archive
echo $myCOL1"### Now building tar archive: es_dump_"$myDATE".tgz" $myCOL0
tar cvf es_dump_$myDATE.tar tmp/*
echo $myCOL1"### Done."$myCOL0

View file

@ -1,6 +1,18 @@
#!/bin/bash
myDATE=$(date +%Y%m%d%H%M)
# Make sure ES is available
myES="http://127.0.0.1:64298/"
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
if ! [ "$myESSTATUS" = "1" ]
then
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
exit
else
echo "### Elasticsearch is available, now continuing."
echo
fi
# Set vars
myDATE=$(date +%Y%m%d%H%M)
myINDEXCOUNT=$(curl -s -XGET ''$myES'.kibana/index-pattern/logstash-*' | tr '\\' '\n' | grep "scripted" | wc -w)
myDASHBOARDS=$(curl -s -XGET ''$myES'.kibana/dashboard/_search?filter_path=hits.hits._id&pretty&size=10000' | jq '.hits.hits[] | {_id}' | jq -r '._id')
myVISUALIZATIONS=$(curl -s -XGET ''$myES'.kibana/visualization/_search?filter_path=hits.hits._id&pretty&size=10000' | jq '.hits.hits[] | {_id}' | jq -r '._id')
@ -8,15 +20,21 @@ mySEARCHES=$(curl -s -XGET ''$myES'.kibana/search/_search?filter_path=hits.hits.
myCOL1=""
myCOL0=""
# Let's ensure normal operation on exit or if interrupted ...
function fuCLEANUP {
rm -rf patterns/ dashboards/ visualizations/ searches/
}
trap fuCLEANUP EXIT
# Export index patterns
mkdir -p patterns
echo $myCOL1"### Now dumping"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
echo $myCOL1"### Now exporting"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
curl -s -XGET ''$myES'.kibana/index-pattern/logstash-*?' | jq '._source' > patterns/index-patterns.json
echo
# Export dashboards
mkdir -p dashboards
echo $myCOL1"### Now dumping"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0
echo $myCOL1"### Now exporting"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0
for i in $myDASHBOARDS;
do
echo $myCOL1"###### "$i $myCOL0
@ -26,7 +44,7 @@ echo
# Export visualizations
mkdir -p visualizations
echo $myCOL1"### Now dumping"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0
echo $myCOL1"### Now exporting"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0
for i in $myVISUALIZATIONS;
do
echo $myCOL1"###### "$i $myCOL0
@ -36,7 +54,7 @@ echo
# Export searches
mkdir -p searches
echo $myCOL1"### Now dumping"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0
echo $myCOL1"### Now exporting"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0
for i in $mySEARCHES;
do
echo $myCOL1"###### "$i $myCOL0
@ -44,19 +62,15 @@ for i in $mySEARCHES;
done;
echo
# Pack into tar
echo $myCOL1"### Now packing archive"$myCOL0 "kibana-objects_"$myDATE".tgz"
# Building tar archive
echo $myCOL1"### Now building archive"$myCOL0 "kibana-objects_"$myDATE".tgz"
tar cvfz kibana-objects_$myDATE.tgz patterns dashboards visualizations searches > /dev/null
# Cleanup
rm -rf patterns dashboards visualizations searches
# Stats
echo
echo $myCOL1"### Statistics"
echo $myCOL1"###### Dumped"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
echo $myCOL1"###### Dumped"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0
echo $myCOL1"###### Dumped"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0
echo $myCOL1"###### Dumped"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0
echo $myCOL1"###### Exported"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
echo $myCOL1"###### Exported"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0
echo $myCOL1"###### Exported"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0
echo $myCOL1"###### Exported"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0
echo

View file

@ -1,21 +1,39 @@
#!/bin/bash
myDUMP=$1
# Make sure ES is available
myES="http://127.0.0.1:64298/"
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
if ! [ "$myESSTATUS" = "1" ]
then
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
exit
else
echo "### Elasticsearch is available, now continuing."
echo
fi
# Set vars
myDUMP=$1
myCOL1=""
myCOL0=""
# Let's ensure normal operation on exit or if interrupted ...
function fuCLEANUP {
rm -rf patterns/ dashboards/ visualizations/ searches/
}
trap fuCLEANUP EXIT
# Check if parameter is given and file exists
if [ "$myDUMP" = "" ];
then
echo $myCOL1"### Please provide a backup file name."$myCOL0
echo $myCOL1"### restore-kibana-objects.sh <kibana-objects.tgz>"$myCOL0
echo
exit
then
echo $myCOL1"### Please provide a backup file name."$myCOL0
echo $myCOL1"### restore-kibana-objects.sh <kibana-objects.tgz>"$myCOL0
echo
exit
fi
if ! [ -a $myDUMP ];
then
echo $myCOL1"### File not found."$myCOL0
exit
then
echo $myCOL1"### File not found."$myCOL0
exit
fi
# Unpack tar
@ -23,14 +41,14 @@ tar xvfz $myDUMP > /dev/null
# Restore index patterns
myINDEXCOUNT=$(cat patterns/index-patterns.json | tr '\\' '\n' | grep "scripted" | wc -w)
echo $myCOL1"### Now restoring"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
echo $myCOL1"### Now importing"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
curl -s -XDELETE ''$myES'.kibana/index-pattern/logstash-*' > /dev/null
curl -s -XPUT ''$myES'.kibana/index-pattern/logstash-*' -T patterns/index-patterns.json > /dev/null
echo
# Restore dashboards
myDASHBOARDS=$(basename -s .json -a dashboards/*.json)
echo $myCOL1"### Now restoring "$myCOL0$(echo $myDASHBOARDS | wc -w)$myCOL1 "dashboards." $myCOL0
myDASHBOARDS=$(ls dashboards/*.json | cut -c 12- | rev | cut -c 6- | rev)
echo $myCOL1"### Now importing "$myCOL0$(echo $myDASHBOARDS | wc -w)$myCOL1 "dashboards." $myCOL0
for i in $myDASHBOARDS;
do
echo $myCOL1"###### "$i $myCOL0
@ -40,8 +58,8 @@ for i in $myDASHBOARDS;
echo
# Restore visualizations
myVISUALIZATIONS=$(basename -s .json -a visualizations/*.json)
echo $myCOL1"### Now restoring "$myCOL0$(echo $myVISUALIZATIONS | wc -w)$myCOL1 "visualizations." $myCOL0
myVISUALIZATIONS=$(ls visualizations/*.json | cut -c 16- | rev | cut -c 6- | rev)
echo $myCOL1"### Now importing "$myCOL0$(echo $myVISUALIZATIONS | wc -w)$myCOL1 "visualizations." $myCOL0
for i in $myVISUALIZATIONS;
do
echo $myCOL1"###### "$i $myCOL0
@ -51,8 +69,8 @@ for i in $myVISUALIZATIONS;
echo
# Restore searches
mySEARCHES=$(basename -s .json -a searches/*.json)
echo $myCOL1"### Now restoring "$myCOL0$(echo $mySEARCHES | wc -w)$myCOL1 "searches." $myCOL0
mySEARCHES=$(ls searches/*.json | cut -c 10- | rev | cut -c 6- | rev)
echo $myCOL1"### Now importing "$myCOL0$(echo $mySEARCHES | wc -w)$myCOL1 "searches." $myCOL0
for i in $mySEARCHES;
do
echo $myCOL1"###### "$i $myCOL0
@ -61,15 +79,12 @@ for i in $mySEARCHES;
done;
echo
# Clean up
rm -rf patterns dashboards visualizations searches
# Stats
echo
echo $myCOL1"### Statistics"
echo $myCOL1"###### Restored"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
echo $myCOL1"###### Restored"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0
echo $myCOL1"###### Restored"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0
echo $myCOL1"###### Restored"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0
echo $myCOL1"###### Imported"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
echo $myCOL1"###### Imported"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0
echo $myCOL1"###### Imported"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0
echo $myCOL1"###### Imported"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0
echo

View file

@ -1,6 +1,23 @@
#/bin/bash
myDUMP=$1
# Make sure ES is available
myES="http://127.0.0.1:64298/"
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
if ! [ "$myESSTATUS" = "1" ]
then
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
exit
else
echo "### Elasticsearch is available, now continuing."
fi
# Let's ensure normal operation on exit or if interrupted ...
function fuCLEANUP {
rm -rf tmp
}
trap fuCLEANUP EXIT
# Set vars
myDUMP=$1
myCOL1=""
myCOL0=""
@ -8,7 +25,7 @@ myCOL0=""
if [ "$myDUMP" = "" ];
then
echo $myCOL1"### Please provide a backup file name."$myCOL0
echo $myCOL1"### restore-elk.sh <es_dump.tgz>"$myCOL0
echo $myCOL1"### restore-elk.sh <es_dump.tar>"$myCOL0
echo
exit
fi
@ -20,26 +37,24 @@ fi
# Unpack tar archive
echo $myCOL1"### Now unpacking tar archive: "$myDUMP $myCOL0
mkdir tmp
tar xvfz $myDUMP -C tmp
cd tmp
tar xvf $myDUMP
# Build indices list
myINDICES=$(ls | cut -c 1-19)
myINDICES=$(ls tmp/logstash*.gz | cut -c 5- | rev | cut -c 4- | rev)
echo $myCOL1"### The following indices will be restored: "$myCOL0
echo $myINDICES
echo
# Restore indices
for i in $myINDICES;
do
# Delete index if it already exists
curl -s -XDELETE $myES$i > /dev/null
echo $myCOL1"### Now uncompressing: "$i".gz" $myCOL0
gunzip $i.gz
echo $myCOL1"### Now uncompressing: tmp/$i.gz" $myCOL0
gunzip -f tmp/$i.gz
# Restore index to ES
echo $myCOL1"### Now restoring: "$i $myCOL0
elasticdump --input=$i --output=$myES$i --limit 7500
rm $i
elasticdump --input=tmp/$i --output=$myES$i --limit 7500
rm tmp/$i
done;
cd ..
rm -rf tmp
echo $myCOL1"### Done."$myCOL0

Binary file not shown.

Binary file not shown.